@ -607,19 +607,21 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
EXPORT_SYMBOL_GPL ( virtqueue_disable_cb ) ;
/**
* virtqueue_enable_cb - restart callbacks after disable_cb .
* virtqueue_enable_cb_prepare - restart callbacks after disable_cb
* @ vq : the struct virtqueue we ' re talking about .
*
* This re - enables callbacks ; it returns " false " if there are pending
* buffers in the queue , to detect a possible race between the driver
* checking for more work , and enabling callbacks .
* This re - enables callbacks ; it returns current queue state
* in an opaque unsigned value . This value should be later tested by
* virtqueue_poll , to detect a possible race between the driver checking for
* more work , and enabling callbacks .
*
* Caller must ensure we don ' t call this with other virtqueue
* operations at the same time ( except where noted ) .
*/
bool virtqueue_enable_cb ( struct virtqueue * _vq )
unsigned virtqueue_enable_cb_prepare ( struct virtqueue * _vq )
{
struct vring_virtqueue * vq = to_vvq ( _vq ) ;
u16 last_used_idx ;
START_USE ( vq ) ;
@ -629,15 +631,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
* either clear the flags bit or point the event index at the next
* entry . Always do both to keep code simple . */
vq - > vring . avail - > flags & = ~ VRING_AVAIL_F_NO_INTERRUPT ;
vring_used_event ( & vq - > vring ) = vq - > last_used_idx ;
vring_used_event ( & vq - > vring ) = last_used_idx = vq - > last_used_idx ;
END_USE ( vq ) ;
return last_used_idx ;
}
EXPORT_SYMBOL_GPL ( virtqueue_enable_cb_prepare ) ;
/**
* virtqueue_poll - query pending used buffers
* @ vq : the struct virtqueue we ' re talking about .
* @ last_used_idx : virtqueue state ( from call to virtqueue_enable_cb_prepare ) .
*
* Returns " true " if there are pending used buffers in the queue .
*
* This does not need to be serialized .
*/
bool virtqueue_poll ( struct virtqueue * _vq , unsigned last_used_idx )
{
struct vring_virtqueue * vq = to_vvq ( _vq ) ;
virtio_mb ( vq - > weak_barriers ) ;
if ( unlikely ( more_used ( vq ) ) ) {
END_USE ( vq ) ;
return false ;
}
return ( u16 ) last_used_idx ! = vq - > vring . used - > idx ;
}
EXPORT_SYMBOL_GPL ( virtqueue_poll ) ;
END_USE ( vq ) ;
return true ;
/**
* virtqueue_enable_cb - restart callbacks after disable_cb .
* @ vq : the struct virtqueue we ' re talking about .
*
* This re - enables callbacks ; it returns " false " if there are pending
* buffers in the queue , to detect a possible race between the driver
* checking for more work , and enabling callbacks .
*
* Caller must ensure we don ' t call this with other virtqueue
* operations at the same time ( except where noted ) .
*/
bool virtqueue_enable_cb ( struct virtqueue * _vq )
{
unsigned last_used_idx = virtqueue_enable_cb_prepare ( _vq ) ;
return ! virtqueue_poll ( _vq , last_used_idx ) ;
}
EXPORT_SYMBOL_GPL ( virtqueue_enable_cb ) ;