@ -1448,108 +1448,14 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
drbd_info ( resource , " Method to ensure write ordering: %s \n " , write_ordering_str [ resource - > write_ordering ] ) ;
}
/*
* We * may * ignore the discard - zeroes - data setting , if so configured .
*
* Assumption is that it " discard_zeroes_data=0 " is only because the backend
* may ignore partial unaligned discards .
*
* LVM / DM thin as of at least
* LVM version : 2.02 .115 ( 2 ) - RHEL7 ( 2015 - 01 - 28 )
* Library version : 1.02 .93 - RHEL7 ( 2015 - 01 - 28 )
* Driver version : 4.29 .0
* still behaves this way .
*
* For unaligned ( wrt . alignment and granularity ) or too small discards ,
* we zero - out the initial ( and / or ) trailing unaligned partial chunks ,
* but discard all the aligned full chunks .
*
* At least for LVM / DM thin , the result is effectively " discard_zeroes_data=1 " .
*/
int drbd_issue_discard_or_zero_out ( struct drbd_device * device , sector_t start , unsigned int nr_sectors , bool discard )
{
struct block_device * bdev = device - > ldev - > backing_bdev ;
struct request_queue * q = bdev_get_queue ( bdev ) ;
sector_t tmp , nr ;
unsigned int max_discard_sectors , granularity ;
int alignment ;
int err = 0 ;
if ( ! discard )
goto zero_out ;
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max ( q - > limits . discard_granularity > > 9 , 1U ) ;
alignment = ( bdev_discard_alignment ( bdev ) > > 9 ) % granularity ;
max_discard_sectors = min ( q - > limits . max_discard_sectors , ( 1U < < 22 ) ) ;
max_discard_sectors - = max_discard_sectors % granularity ;
if ( unlikely ( ! max_discard_sectors ) )
goto zero_out ;
if ( nr_sectors < granularity )
goto zero_out ;
tmp = start ;
if ( sector_div ( tmp , granularity ) ! = alignment ) {
if ( nr_sectors < 2 * granularity )
goto zero_out ;
/* start + gran - (start + gran - align) % gran */
tmp = start + granularity - alignment ;
tmp = start + granularity - sector_div ( tmp , granularity ) ;
nr = tmp - start ;
err | = blkdev_issue_zeroout ( bdev , start , nr , GFP_NOIO ,
BLKDEV_ZERO_NOUNMAP ) ;
nr_sectors - = nr ;
start = tmp ;
}
while ( nr_sectors > = granularity ) {
nr = min_t ( sector_t , nr_sectors , max_discard_sectors ) ;
err | = blkdev_issue_discard ( bdev , start , nr , GFP_NOIO ,
BLKDEV_ZERO_NOUNMAP ) ;
nr_sectors - = nr ;
start + = nr ;
}
zero_out :
if ( nr_sectors ) {
err | = blkdev_issue_zeroout ( bdev , start , nr_sectors , GFP_NOIO ,
BLKDEV_ZERO_NOUNMAP ) ;
}
return err ! = 0 ;
}
static bool can_do_reliable_discards ( struct drbd_device * device )
{
struct request_queue * q = bdev_get_queue ( device - > ldev - > backing_bdev ) ;
struct disk_conf * dc ;
bool can_do ;
if ( ! blk_queue_discard ( q ) )
return false ;
if ( q - > limits . discard_zeroes_data )
return true ;
rcu_read_lock ( ) ;
dc = rcu_dereference ( device - > ldev - > disk_conf ) ;
can_do = dc - > discard_zeroes_if_aligned ;
rcu_read_unlock ( ) ;
return can_do ;
}
static void drbd_issue_peer_discard ( struct drbd_device * device , struct drbd_peer_request * peer_req )
{
/* If the backend cannot discard, or does not guarantee
* read - back zeroes in discarded ranges , we fall back to
* zero - out . Unless configuration specifically requested
* otherwise . */
if ( ! can_do_reliable_discards ( device ) )
peer_req - > flags | = EE_IS_TRIM_USE_ZEROOUT ;
struct block_device * bdev = device - > ldev - > backing_bdev ;
if ( dr bd_issue_discard_or_ zero_ out( devic e , peer_req - > i . sector ,
peer_req - > i . size > > 9 , ! ( peer_req - > flags & EE_IS_TRIM_USE_ZEROOUT ) ) )
if ( blkdev_issue_zeroout ( bdev , peer_req - > i . sector , peer_req - > i . size > > 9 ,
GFP_NOIO , 0 ) )
peer_req - > flags | = EE_WAS_ERROR ;
drbd_endio_write_sec_final ( peer_req ) ;
}