@ -1562,13 +1562,14 @@ static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
res = PTR_ERR ( iod ) ;
goto out ;
}
length = nvme_setup_prps ( dev , & c . common , iod , tot_len ,
GFP_KERNEL ) ;
length = nvme_setup_prps ( dev , iod , tot_len , GFP_KERNEL ) ;
if ( length ! = tot_len ) {
res = - ENOMEM ;
goto out_unmap ;
}
c . dlfw . prp1 = cpu_to_le64 ( sg_dma_address ( iod - > sg ) ) ;
c . dlfw . prp2 = cpu_to_le64 ( iod - > first_dma ) ;
c . dlfw . numd = cpu_to_le32 ( ( tot_len / BYTES_TO_DWORDS ) - 1 ) ;
c . dlfw . offset = cpu_to_le32 ( offset / BYTES_TO_DWORDS ) ;
} else if ( opcode = = nvme_admin_activate_fw ) {
@ -2033,7 +2034,6 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int res = SNTI_TRANSLATION_SUCCESS ;
int nvme_sc ;
struct nvme_dev * dev = ns - > dev ;
struct nvme_queue * nvmeq ;
u32 num_cmds ;
struct nvme_iod * iod ;
u64 unit_len ;
@ -2045,7 +2045,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
struct nvme_command c ;
u8 opcode = ( is_write ? nvme_cmd_write : nvme_cmd_read ) ;
u16 control ;
u32 max_blocks = nvme_block_nr ( ns , dev - > max_hw_sectors ) ;
u32 max_blocks = queue_max_hw_sectors ( ns - > queue ) ;
num_cmds = nvme_trans_io_get_num_cmds ( hdr , cdb_info , max_blocks ) ;
@ -2093,8 +2093,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
res = PTR_ERR ( iod ) ;
goto out ;
}
retcode = nvme_setup_prps ( dev , & c . common , iod , unit_len ,
GFP_KERNEL ) ;
retcode = nvme_setup_prps ( dev , iod , unit_len , GFP_KERNEL ) ;
if ( retcode ! = unit_len ) {
nvme_unmap_user_pages ( dev ,
( is_write ) ? DMA_TO_DEVICE : DMA_FROM_DEVICE ,
@ -2103,21 +2102,12 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
res = - ENOMEM ;
goto out ;
}
c . rw . prp1 = cpu_to_le64 ( sg_dma_address ( iod - > sg ) ) ;
c . rw . prp2 = cpu_to_le64 ( iod - > first_dma ) ;
nvme_offset + = unit_num_blocks ;
nvmeq = get_nvmeq ( dev ) ;
/*
* Since nvme_submit_sync_cmd sleeps , we can ' t keep
* preemption disabled . We may be preempted at any
* point , and be rescheduled to a different CPU . That
* will cause cacheline bouncing , but no additional
* races since q_lock already protects against other
* CPUs .
*/
put_nvmeq ( nvmeq ) ;
nvme_sc = nvme_submit_sync_cmd ( nvmeq , & c , NULL ,
NVME_IO_TIMEOUT ) ;
nvme_sc = nvme_submit_io_cmd ( dev , & c , NULL ) ;
if ( nvme_sc ! = NVME_SC_SUCCESS ) {
nvme_unmap_user_pages ( dev ,
( is_write ) ? DMA_TO_DEVICE : DMA_FROM_DEVICE ,
@ -2644,7 +2634,6 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{
int res = SNTI_TRANSLATION_SUCCESS ;
int nvme_sc ;
struct nvme_queue * nvmeq ;
struct nvme_command c ;
u8 immed , pcmod , pc , no_flush , start ;
@ -2671,10 +2660,7 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c . common . opcode = nvme_cmd_flush ;
c . common . nsid = cpu_to_le32 ( ns - > ns_id ) ;
nvmeq = get_nvmeq ( ns - > dev ) ;
put_nvmeq ( nvmeq ) ;
nvme_sc = nvme_submit_sync_cmd ( nvmeq , & c , NULL , NVME_IO_TIMEOUT ) ;
nvme_sc = nvme_submit_io_cmd ( ns - > dev , & c , NULL ) ;
res = nvme_trans_status_code ( hdr , nvme_sc ) ;
if ( res )
goto out ;
@ -2697,15 +2683,12 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
int res = SNTI_TRANSLATION_SUCCESS ;
int nvme_sc ;
struct nvme_command c ;
struct nvme_queue * nvmeq ;
memset ( & c , 0 , sizeof ( c ) ) ;
c . common . opcode = nvme_cmd_flush ;
c . common . nsid = cpu_to_le32 ( ns - > ns_id ) ;
nvmeq = get_nvmeq ( ns - > dev ) ;
put_nvmeq ( nvmeq ) ;
nvme_sc = nvme_submit_sync_cmd ( nvmeq , & c , NULL , NVME_IO_TIMEOUT ) ;
nvme_sc = nvme_submit_io_cmd ( ns - > dev , & c , NULL ) ;
res = nvme_trans_status_code ( hdr , nvme_sc ) ;
if ( res )
@ -2872,7 +2855,6 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
struct nvme_dev * dev = ns - > dev ;
struct scsi_unmap_parm_list * plist ;
struct nvme_dsm_range * range ;
struct nvme_queue * nvmeq ;
struct nvme_command c ;
int i , nvme_sc , res = - ENOMEM ;
u16 ndesc , list_len ;
@ -2914,10 +2896,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c . dsm . nr = cpu_to_le32 ( ndesc - 1 ) ;
c . dsm . attributes = cpu_to_le32 ( NVME_DSMGMT_AD ) ;
nvmeq = get_nvmeq ( dev ) ;
put_nvmeq ( nvmeq ) ;
nvme_sc = nvme_submit_sync_cmd ( nvmeq , & c , NULL , NVME_IO_TIMEOUT ) ;
nvme_sc = nvme_submit_io_cmd ( dev , & c , NULL ) ;
res = nvme_trans_status_code ( hdr , nvme_sc ) ;
dma_free_coherent ( & dev - > pci_dev - > dev , ndesc * sizeof ( * range ) ,