@ -13,23 +13,6 @@
# include <linux/export.h>
# include <linux/bug.h>
/*
* Generic direct DMA implementation
*
* This implementation supports a per - device offset that can be applied if
* the address at which memory is visible to devices is not 0. Platform code
* can set archdata . dma_data to an unsigned long holding the offset . By
* default the offset is PCI_DRAM_OFFSET .
*/
static unsigned long get_dma_direct_offset ( struct device * dev )
{
if ( likely ( dev ) )
return ( unsigned long ) dev - > archdata . dma_data ;
return PCI_DRAM_OFFSET ; /* FIXME Not sure if is correct */
}
# define NOT_COHERENT_CACHE
static void * dma_direct_alloc_coherent ( struct device * dev , size_t size ,
@ -51,7 +34,7 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
return NULL ;
ret = page_address ( page ) ;
memset ( ret , 0 , size ) ;
* dma_handle = virt_to_phys ( ret ) + get_dma_direct_offset ( dev ) ;
* dma_handle = virt_to_phys ( ret ) ;
return ret ;
# endif
@ -77,7 +60,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
/* FIXME this part of code is untested */
for_each_sg ( sgl , sg , nents , i ) {
sg - > dma_address = sg_phys ( sg ) + get_dma_direct_offset ( dev ) ;
sg - > dma_address = sg_phys ( sg ) ;
__dma_sync ( page_to_phys ( sg_page ( sg ) ) + sg - > offset ,
sg - > length , direction ) ;
}
@ -104,7 +87,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
struct dma_attrs * attrs )
{
__dma_sync ( page_to_phys ( page ) + offset , size , direction ) ;
return page_to_phys ( page ) + offset + get_dma_direct_offset ( dev ) ;
return page_to_phys ( page ) + offset ;
}
static inline void dma_direct_unmap_page ( struct device * dev ,