@ -270,21 +270,20 @@ static inline void __dc_entire_op(const int cacheop)
* Doesn ' t deal with type - of - op / IRQ - disabling / waiting - for - flush - to - complete
* It ' s sole purpose is to help gcc generate ZOL
*/
static inline void __dc_line_loop ( unsigned long start , unsigned long sz ,
int aux_reg )
static inline void __dc_line_loop ( unsigned long paddr , unsigned long sz ,
int aux_reg )
{
int num_lines , slack ;
int num_lines ;
/* Ensure we properly floor/ceil the non-line aligned/sized requests
* and have @ start - aligned to cache line and integral @ num_lines .
* and have @ paddr - aligned to cache line and integral @ num_lines .
* This however can be avoided for page sized since :
* - @ start will be cache - line aligned already ( being page aligned )
* - @ paddr will be cache - line aligned already ( being page aligned )
* - @ sz will be integral multiple of line size ( being page sized ) .
*/
if ( ! ( __builtin_constant_p ( sz ) & & sz = = PAGE_SIZE ) ) {
slack = start & ~ DCACHE_LINE_MASK ;
sz + = slack ;
start - = slack ;
sz + = paddr & ~ DCACHE_LINE_MASK ;
paddr & = DCACHE_LINE_MASK ;
}
num_lines = DIV_ROUND_UP ( sz , ARC_DCACHE_LINE_LEN ) ;
@ -298,17 +297,17 @@ static inline void __dc_line_loop(unsigned long start, unsigned long sz,
* doesn ' t support aliasing configs for D $ , yet .
* Thus paddr is enough to provide both tag and index .
*/
write_aux_reg ( ARC_REG_DC_PTAG , start ) ;
write_aux_reg ( ARC_REG_DC_PTAG , paddr ) ;
# endif
write_aux_reg ( aux_reg , start ) ;
start + = ARC_DCACHE_LINE_LEN ;
write_aux_reg ( aux_reg , paddr ) ;
paddr + = ARC_DCACHE_LINE_LEN ;
}
}
/*
* D - Cache : Per Line INV ( discard or wback + discard ) or FLUSH ( wback )
*/
static inline void __dc_line_op ( unsigned long start , unsigned long sz ,
static inline void __dc_line_op ( unsigned long paddr , unsigned long sz ,
const int cacheop )
{
unsigned long flags , tmp = tmp ;
@ -332,7 +331,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
else
aux = ARC_REG_DC_FLDL ;
__dc_line_loop ( start , sz , aux ) ;
__dc_line_loop ( paddr , sz , aux ) ;
if ( cacheop & OP_FLUSH ) /* flush / flush-n-inv both wait */
wait_for_flush ( ) ;
@ -347,7 +346,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
# else
# define __dc_entire_op(cacheop)
# define __dc_line_op(start , sz, cacheop)
# define __dc_line_op(paddr , sz, cacheop)
# endif /* CONFIG_ARC_HAS_DCACHE */
@ -399,49 +398,45 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
/***********************************************************
* Machine specific helper for per line I - Cache invalidate .
*/
static void __ic_line_inv_vaddr ( unsigned long phy_st art , unsigned long vaddr ,
static void __ic_line_inv_vaddr ( unsigned long padd r , unsigned long vaddr ,
unsigned long sz )
{
unsigned long flags ;
int num_lines , slack ;
unsigned int addr ;
int num_lines ;
/*
* Ensure we properly floor / ceil the non - line aligned / sized requests :
* However page sized flushes can be compile time optimised .
* - @ phy_st art will be cache - line aligned already ( being page aligned )
* - @ padd r will be cache - line aligned already ( being page aligned )
* - @ sz will be integral multiple of line size ( being page sized ) .
*/
if ( ! ( __builtin_constant_p ( sz ) & & sz = = PAGE_SIZE ) ) {
slack = phy_start & ~ ICACHE_LINE_MASK ;
sz + = slack ;
phy_start - = slack ;
sz + = paddr & ~ ICACHE_LINE_MASK ;
paddr & = ICACHE_LINE_MASK ;
vaddr & = ICACHE_LINE_MASK ;
}
num_lines = DIV_ROUND_UP ( sz , ARC_ICACHE_LINE_LEN ) ;
# if (CONFIG_ARC_MMU_VER > 2)
vaddr & = ICACHE_LINE_MASK ;
addr = phy_start ;
# else
# if (CONFIG_ARC_MMU_VER <= 2)
/* bits 17:13 of vaddr go as bits 4:0 of paddr */
addr = phy_start | ( ( vaddr > > 13 ) & 0x1F ) ;
paddr | = ( vaddr > > PAGE_SHIFT ) & 0x1F ;
# endif
local_irq_save ( flags ) ;
while ( num_lines - - > 0 ) {
# if (CONFIG_ARC_MMU_VER > 2)
/* tag comes from phy addr */
write_aux_reg ( ARC_REG_IC_PTAG , addr ) ;
write_aux_reg ( ARC_REG_IC_PTAG , p addr) ;
/* index bits come from vaddr */
write_aux_reg ( ARC_REG_IC_IVIL , vaddr ) ;
vaddr + = ARC_ICACHE_LINE_LEN ;
# else
/* paddr contains stuffed vaddrs bits */
write_aux_reg ( ARC_REG_IC_IVIL , addr ) ;
write_aux_reg ( ARC_REG_IC_IVIL , p addr) ;
# endif
addr + = ARC_ICACHE_LINE_LEN ;
p addr + = ARC_ICACHE_LINE_LEN ;
}
local_irq_restore ( flags ) ;
}