@ -394,11 +394,11 @@ dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
dma_addr_t dmaaddr ;
if ( tx )
dmaaddr = ssb_ dma_map_single( ring - > dev - > dev ,
dmaaddr = dma_map_single ( ring - > dev - > dev - > dma_ dev,
buf , len ,
DMA_TO_DEVICE ) ;
else
dmaaddr = ssb_ dma_map_single( ring - > dev - > dev ,
dmaaddr = dma_map_single ( ring - > dev - > dev - > dma_ dev,
buf , len ,
DMA_FROM_DEVICE ) ;
@ -412,11 +412,11 @@ void unmap_descbuffer(struct b43legacy_dmaring *ring,
int tx )
{
if ( tx )
ssb_ dma_unmap_single( ring - > dev - > dev ,
dma_unmap_single ( ring - > dev - > dev - > dma_ dev,
addr , len ,
DMA_TO_DEVICE ) ;
else
ssb_ dma_unmap_single( ring - > dev - > dev ,
dma_unmap_single ( ring - > dev - > dev - > dma_ dev,
addr , len ,
DMA_FROM_DEVICE ) ;
}
@ -428,8 +428,8 @@ void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
{
B43legacy_WARN_ON ( ring - > tx ) ;
ssb_ dma_sync_single_for_cpu( ring - > dev - > dev ,
addr , len , DMA_FROM_DEVICE ) ;
dma_sync_single_for_cpu ( ring - > dev - > dev - > dma_ dev,
addr , len , DMA_FROM_DEVICE ) ;
}
static inline
@ -439,8 +439,8 @@ void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
{
B43legacy_WARN_ON ( ring - > tx ) ;
ssb_ dma_sync_single_for_device( ring - > dev - > dev ,
addr , len , DMA_FROM_DEVICE ) ;
dma_sync_single_for_device ( ring - > dev - > dev - > dma_ dev,
addr , len , DMA_FROM_DEVICE ) ;
}
static inline
@ -460,10 +460,10 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring,
static int alloc_ringmemory ( struct b43legacy_dmaring * ring )
{
/* GFP flags must match the flags in free_ringmemory()! */
ring - > descbase = ssb_dma_alloc_consist ent( ring - > dev - > dev ,
B43legacy_DMA_RINGMEMSIZE ,
& ( ring - > dmabase ) ,
GFP_KERNEL ) ;
ring - > descbase = dma_alloc_coher ent( ring - > dev - > dev - > dma_ dev,
B43legacy_DMA_RINGMEMSIZE ,
& ( ring - > dmabase ) ,
GFP_KERNEL ) ;
if ( ! ring - > descbase ) {
b43legacyerr ( ring - > dev - > wl , " DMA ringmemory allocation "
" failed \n " ) ;
@ -476,8 +476,8 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring)
static void free_ringmemory ( struct b43legacy_dmaring * ring )
{
ssb_dma_free_consist ent( ring - > dev - > dev , B43legacy_DMA_RINGMEMSIZE ,
ring - > descbase , ring - > dmabase , GFP_KERNEL ) ;
dma_free_coher ent( ring - > dev - > dev - > dma_ dev, B43legacy_DMA_RINGMEMSIZE ,
ring - > descbase , ring - > dmabase ) ;
}
/* Reset the RX DMA channel */
@ -589,7 +589,7 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
size_t buffersize ,
bool dma_to_device )
{
if ( unlikely ( ssb_ dma_mapping_error( ring - > dev - > dev , addr ) ) )
if ( unlikely ( dma_mapping_error ( ring - > dev - > dev - > dma_ dev, addr ) ) )
return 1 ;
switch ( ring - > type ) {
@ -906,7 +906,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
goto err_kfree_meta ;
/* test for ability to dma to txhdr_cache */
dma_test = ssb_ dma_map_single( dev - > dev , ring - > txhdr_cache ,
dma_test = dma_map_single ( dev - > dev - > dma_ dev, ring - > txhdr_cache ,
sizeof ( struct b43legacy_txhdr_fw3 ) ,
DMA_TO_DEVICE ) ;
@ -920,7 +920,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
if ( ! ring - > txhdr_cache )
goto err_kfree_meta ;
dma_test = ssb_ dma_map_single( dev - > dev ,
dma_test = dma_map_single ( dev - > dev - > dma_ dev,
ring - > txhdr_cache ,
sizeof ( struct b43legacy_txhdr_fw3 ) ,
DMA_TO_DEVICE ) ;
@ -930,9 +930,9 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
goto err_kfree_txhdr_cache ;
}
ssb_ dma_unmap_single( dev - > dev , dma_test ,
sizeof ( struct b43legacy_txhdr_fw3 ) ,
DMA_TO_DEVICE ) ;
dma_unmap_single ( dev - > dev - > dma_ dev, dma_test ,
sizeof ( struct b43legacy_txhdr_fw3 ) ,
DMA_TO_DEVICE ) ;
}
ring - > nr_slots = nr_slots ;
@ -1040,9 +1040,12 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
/* Try to set the DMA mask. If it fails, try falling back to a
* lower mask , as we can always also support a lower one . */
while ( 1 ) {
err = ssb_dma_set_mask ( dev - > dev , mask ) ;
if ( ! err )
break ;
err = dma_set_mask ( dev - > dev - > dma_dev , mask ) ;
if ( ! err ) {
err = dma_set_coherent_mask ( dev - > dev - > dma_dev , mask ) ;
if ( ! err )
break ;
}
if ( mask = = DMA_BIT_MASK ( 64 ) ) {
mask = DMA_BIT_MASK ( 32 ) ;
fallback = 1 ;