diff options
Diffstat (limited to 'target/linux/brcm-2.4/patches/003-bcm47xx_cache_fixes.patch')
-rw-r--r-- | target/linux/brcm-2.4/patches/003-bcm47xx_cache_fixes.patch | 498 |
1 files changed, 498 insertions, 0 deletions
diff --git a/target/linux/brcm-2.4/patches/003-bcm47xx_cache_fixes.patch b/target/linux/brcm-2.4/patches/003-bcm47xx_cache_fixes.patch new file mode 100644 index 0000000000..e971e7fdfd --- /dev/null +++ b/target/linux/brcm-2.4/patches/003-bcm47xx_cache_fixes.patch @@ -0,0 +1,498 @@ +diff -urN linux.old/arch/mips/kernel/entry.S linux.dev/arch/mips/kernel/entry.S +--- linux.old/arch/mips/kernel/entry.S 2005-07-05 16:46:49.000000000 +0200 ++++ linux.dev/arch/mips/kernel/entry.S 2005-07-06 11:23:55.000000000 +0200 +@@ -100,6 +100,10 @@ + * and R4400 SC and MC versions. + */ + NESTED(except_vec3_generic, 0, sp) ++#ifdef CONFIG_BCM4710 ++ nop ++ nop ++#endif + #if R5432_CP0_INTERRUPT_WAR + mfc0 k0, CP0_INDEX + #endif +diff -urN linux.old/arch/mips/mm/c-r4k.c linux.dev/arch/mips/mm/c-r4k.c +--- linux.old/arch/mips/mm/c-r4k.c 2005-07-05 16:46:49.000000000 +0200 ++++ linux.dev/arch/mips/mm/c-r4k.c 2005-07-06 11:23:55.000000000 +0200 +@@ -14,6 +14,12 @@ + #include <linux/mm.h> + #include <linux/bitops.h> + ++#ifdef CONFIG_BCM4710 ++#include "../bcm947xx/include/typedefs.h" ++#include "../bcm947xx/include/sbconfig.h" ++#include <asm/paccess.h> ++#endif ++ + #include <asm/bcache.h> + #include <asm/bootinfo.h> + #include <asm/cacheops.h> +@@ -40,6 +46,8 @@ + .bc_inv = (void *)no_sc_noop + }; + ++int bcm4710 = 0; ++EXPORT_SYMBOL(bcm4710); + struct bcache_ops *bcops = &no_sc_ops; + + #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010) +@@ -64,8 +72,10 @@ + static inline void r4k_blast_dcache_page_setup(void) + { + unsigned long dc_lsize = current_cpu_data.dcache.linesz; +- +- if (dc_lsize == 16) ++ ++ if (bcm4710) ++ r4k_blast_dcache_page = blast_dcache_page; ++ else if (dc_lsize == 16) + r4k_blast_dcache_page = blast_dcache16_page; + else if (dc_lsize == 32) + r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; +@@ -77,7 +87,9 @@ + { + unsigned long dc_lsize = current_cpu_data.dcache.linesz; + +- if (dc_lsize == 16) ++ if (bcm4710) ++ r4k_blast_dcache_page_indexed = blast_dcache_page_indexed; ++ else if (dc_lsize == 16) + r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; + else if (dc_lsize == 32) + r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; +@@ -89,7 +101,9 @@ + { + unsigned long dc_lsize = current_cpu_data.dcache.linesz; + +- if (dc_lsize == 16) ++ if (bcm4710) ++ r4k_blast_dcache = blast_dcache; ++ else if (dc_lsize == 16) + r4k_blast_dcache = blast_dcache16; + else if (dc_lsize == 32) + r4k_blast_dcache = blast_dcache32; +@@ -266,6 +280,7 @@ + r4k_blast_dcache(); + r4k_blast_icache(); + ++ if (!bcm4710) + switch (current_cpu_data.cputype) { + case CPU_R4000SC: + case CPU_R4000MC: +@@ -304,10 +319,10 @@ + * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we + * only flush the primary caches but R10000 and R12000 behave sane ... + */ +- if (current_cpu_data.cputype == CPU_R4000SC || ++ if (!bcm4710 && (current_cpu_data.cputype == CPU_R4000SC || + current_cpu_data.cputype == CPU_R4000MC || + current_cpu_data.cputype == CPU_R4400SC || +- current_cpu_data.cputype == CPU_R4400MC) ++ current_cpu_data.cputype == CPU_R4400MC)) + r4k_blast_scache(); + } + +@@ -383,12 +398,15 @@ + unsigned long ic_lsize = current_cpu_data.icache.linesz; + unsigned long addr, aend; + ++ addr = start & ~(dc_lsize - 1); ++ aend = (end - 1) & ~(dc_lsize - 1); ++ + if (!cpu_has_ic_fills_f_dc) { + if (end - start > dcache_size) + r4k_blast_dcache(); + else { +- addr = start & ~(dc_lsize - 1); +- aend = (end - 1) & ~(dc_lsize - 1); ++ BCM4710_PROTECTED_FILL_TLB(addr); ++ BCM4710_PROTECTED_FILL_TLB(aend); + + while (1) { + /* Hit_Writeback_Inv_D */ +@@ -403,8 +421,6 @@ + if (end - start > icache_size) + r4k_blast_icache(); + else { +- addr = start & ~(ic_lsize - 1); +- aend = (end - 1) & ~(ic_lsize - 1); + while (1) { + /* Hit_Invalidate_I */ + protected_flush_icache_line(addr); +@@ -413,6 +429,9 @@ + addr += ic_lsize; + } + } ++ ++ if (bcm4710) ++ flush_cache_all(); + } + + /* +@@ -443,7 +462,8 @@ + if (cpu_has_subset_pcaches) { + unsigned long addr = (unsigned long) page_address(page); + +- r4k_blast_scache_page(addr); ++ if (!bcm4710) ++ r4k_blast_scache_page(addr); + ClearPageDcacheDirty(page); + + return; +@@ -451,6 +471,7 @@ + + if (!cpu_has_ic_fills_f_dc) { + unsigned long addr = (unsigned long) page_address(page); ++ + r4k_blast_dcache_page(addr); + ClearPageDcacheDirty(page); + } +@@ -477,7 +498,7 @@ + /* Catch bad driver code */ + BUG_ON(size == 0); + +- if (cpu_has_subset_pcaches) { ++ if (!bcm4710 && cpu_has_subset_pcaches) { + unsigned long sc_lsize = current_cpu_data.scache.linesz; + + if (size >= scache_size) { +@@ -509,6 +530,8 @@ + R4600_HIT_CACHEOP_WAR_IMPL; + a = addr & ~(dc_lsize - 1); + end = (addr + size - 1) & ~(dc_lsize - 1); ++ BCM4710_FILL_TLB(a); ++ BCM4710_FILL_TLB(end); + while (1) { + flush_dcache_line(a); /* Hit_Writeback_Inv_D */ + if (a == end) +@@ -527,7 +550,7 @@ + /* Catch bad driver code */ + BUG_ON(size == 0); + +- if (cpu_has_subset_pcaches) { ++ if (!bcm4710 && (cpu_has_subset_pcaches)) { + unsigned long sc_lsize = current_cpu_data.scache.linesz; + + if (size >= scache_size) { +@@ -554,6 +577,8 @@ + R4600_HIT_CACHEOP_WAR_IMPL; + a = addr & ~(dc_lsize - 1); + end = (addr + size - 1) & ~(dc_lsize - 1); ++ BCM4710_FILL_TLB(a); ++ BCM4710_FILL_TLB(end); + while (1) { + flush_dcache_line(a); /* Hit_Writeback_Inv_D */ + if (a == end) +@@ -577,6 +602,8 @@ + unsigned long dc_lsize = current_cpu_data.dcache.linesz; + + R4600_HIT_CACHEOP_WAR_IMPL; ++ BCM4710_PROTECTED_FILL_TLB(addr); ++ BCM4710_PROTECTED_FILL_TLB(addr + 4); + protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); + protected_flush_icache_line(addr & ~(ic_lsize - 1)); + if (MIPS4K_ICACHE_REFILL_WAR) { +@@ -986,10 +1013,12 @@ + case CPU_R4000MC: + case CPU_R4400SC: + case CPU_R4400MC: +- probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache)); +- sc_present = probe_scache_kseg1(config); +- if (sc_present) +- c->options |= MIPS_CPU_CACHE_CDEX_S; ++ if (!bcm4710) { ++ probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache)); ++ sc_present = probe_scache_kseg1(config); ++ if (sc_present) ++ c->options |= MIPS_CPU_CACHE_CDEX_S; ++ } + break; + + case CPU_R10000: +@@ -1041,6 +1070,19 @@ + static inline void coherency_setup(void) + { + change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); ++ ++#if defined(CONFIG_BCM4310) || defined(CONFIG_BCM4704) || defined(CONFIG_BCM5365) ++ if (BCM330X(current_cpu_data.processor_id)) { ++ uint32 cm; ++ ++ cm = read_c0_diag(); ++ /* Enable icache */ ++ cm |= (1 << 31); ++ /* Enable dcache */ ++ cm |= (1 << 30); ++ write_c0_diag(cm); ++ } ++#endif + + /* + * c0_status.cu=0 specifies that updates by the sc instruction use +@@ -1073,6 +1115,12 @@ + memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic, 0x80); + memcpy((void *)(KSEG1 + 0x100), &except_vec2_generic, 0x80); + ++ if (current_cpu_data.cputype == CPU_BCM4710 && (current_cpu_data.processor_id & PRID_REV_MASK) == 0) { ++ printk("Enabling BCM4710A0 cache workarounds.\n"); ++ bcm4710 = 1; ++ } else ++ bcm4710 = 0; ++ + probe_pcache(); + setup_scache(); + +diff -urN linux.old/arch/mips/mm/tlbex-mips32.S linux.dev/arch/mips/mm/tlbex-mips32.S +--- linux.old/arch/mips/mm/tlbex-mips32.S 2005-07-05 16:46:49.000000000 +0200 ++++ linux.dev/arch/mips/mm/tlbex-mips32.S 2005-07-06 11:23:56.000000000 +0200 +@@ -90,6 +90,9 @@ + .set noat + LEAF(except_vec0_r4000) + .set mips3 ++#ifdef CONFIG_BCM4704 ++ nop ++#endif + #ifdef CONFIG_SMP + mfc0 k1, CP0_CONTEXT + la k0, pgd_current +diff -urN linux.old/include/asm-mips/r4kcache.h linux.dev/include/asm-mips/r4kcache.h +--- linux.old/include/asm-mips/r4kcache.h 2005-07-05 16:46:49.000000000 +0200 ++++ linux.dev/include/asm-mips/r4kcache.h 2005-07-06 12:52:57.000000000 +0200 +@@ -15,6 +15,18 @@ + #include <asm/asm.h> + #include <asm/cacheops.h> + ++#ifdef CONFIG_BCM4710 ++#define BCM4710_DUMMY_RREG() (((sbconfig_t *)(KSEG1ADDR(SB_ENUM_BASE + SBCONFIGOFF)))->sbimstate) ++ ++#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr)) ++#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); }) ++#else ++#define BCM4710_DUMMY_RREG() ++ ++#define BCM4710_FILL_TLB(addr) ++#define BCM4710_PROTECTED_FILL_TLB(addr) ++#endif ++ + #define cache_op(op,addr) \ + __asm__ __volatile__( \ + " .set noreorder \n" \ +@@ -27,12 +39,25 @@ + + static inline void flush_icache_line_indexed(unsigned long addr) + { +- cache_op(Index_Invalidate_I, addr); ++ unsigned int way; ++ unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; ++ ++ for (way = 0; way < current_cpu_data.dcache.ways; way++) { ++ cache_op(Index_Invalidate_I, addr); ++ addr += ws_inc; ++ } + } + + static inline void flush_dcache_line_indexed(unsigned long addr) + { +- cache_op(Index_Writeback_Inv_D, addr); ++ unsigned int way; ++ unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; ++ ++ for (way = 0; way < current_cpu_data.dcache.ways; way++) { ++ BCM4710_DUMMY_RREG(); ++ cache_op(Index_Writeback_Inv_D, addr); ++ addr += ws_inc; ++ } + } + + static inline void flush_scache_line_indexed(unsigned long addr) +@@ -47,6 +72,7 @@ + + static inline void flush_dcache_line(unsigned long addr) + { ++ BCM4710_DUMMY_RREG(); + cache_op(Hit_Writeback_Inv_D, addr); + } + +@@ -91,6 +117,7 @@ + */ + static inline void protected_writeback_dcache_line(unsigned long addr) + { ++ BCM4710_DUMMY_RREG(); + __asm__ __volatile__( + ".set noreorder\n\t" + ".set mips3\n" +@@ -138,6 +165,62 @@ + : "r" (base), \ + "i" (op)); + ++#define cache_unroll(base,op) \ ++ __asm__ __volatile__(" \ ++ .set noreorder; \ ++ .set mips3; \ ++ cache %1, (%0); \ ++ .set mips0; \ ++ .set reorder" \ ++ : \ ++ : "r" (base), \ ++ "i" (op)); ++ ++ ++static inline void blast_dcache(void) ++{ ++ unsigned long start = KSEG0; ++ unsigned long dcache_size = current_cpu_data.dcache.waysize * current_cpu_data.dcache.ways; ++ unsigned long end = (start + dcache_size); ++ ++ while(start < end) { ++ BCM4710_DUMMY_RREG(); ++ cache_unroll(start,Index_Writeback_Inv_D); ++ start += current_cpu_data.dcache.linesz; ++ } ++} ++ ++static inline void blast_dcache_page(unsigned long page) ++{ ++ unsigned long start = page; ++ unsigned long end = start + PAGE_SIZE; ++ ++ BCM4710_FILL_TLB(start); ++ do { ++ BCM4710_DUMMY_RREG(); ++ cache_unroll(start,Hit_Writeback_Inv_D); ++ start += current_cpu_data.dcache.linesz; ++ } while (start < end); ++} ++ ++static inline void blast_dcache_page_indexed(unsigned long page) ++{ ++ unsigned long start = page; ++ unsigned long end = start + PAGE_SIZE; ++ unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; ++ unsigned long ws_end = current_cpu_data.dcache.ways << ++ current_cpu_data.dcache.waybit; ++ unsigned long ws, addr; ++ ++ for (ws = 0; ws < ws_end; ws += ws_inc) { ++ start = page + ws; ++ for (addr = start; addr < end; addr += current_cpu_data.dcache.linesz) { ++ BCM4710_DUMMY_RREG(); ++ cache_unroll(addr,Index_Writeback_Inv_D); ++ } ++ } ++} ++ + static inline void blast_dcache16(void) + { + unsigned long start = KSEG0; +@@ -148,8 +231,9 @@ + unsigned long ws, addr; + + for (ws = 0; ws < ws_end; ws += ws_inc) +- for (addr = start; addr < end; addr += 0x200) ++ for (addr = start; addr < end; addr += 0x200) { + cache16_unroll32(addr|ws,Index_Writeback_Inv_D); ++ } + } + + static inline void blast_dcache16_page(unsigned long page) +@@ -173,8 +257,9 @@ + unsigned long ws, addr; + + for (ws = 0; ws < ws_end; ws += ws_inc) +- for (addr = start; addr < end; addr += 0x200) ++ for (addr = start; addr < end; addr += 0x200) { + cache16_unroll32(addr|ws,Index_Writeback_Inv_D); ++ } + } + + static inline void blast_icache16(void) +@@ -196,6 +281,7 @@ + unsigned long start = page; + unsigned long end = start + PAGE_SIZE; + ++ BCM4710_FILL_TLB(start); + do { + cache16_unroll32(start,Hit_Invalidate_I); + start += 0x200; +@@ -281,6 +367,7 @@ + : "r" (base), \ + "i" (op)); + ++ + static inline void blast_dcache32(void) + { + unsigned long start = KSEG0; +@@ -291,8 +378,9 @@ + unsigned long ws, addr; + + for (ws = 0; ws < ws_end; ws += ws_inc) +- for (addr = start; addr < end; addr += 0x400) ++ for (addr = start; addr < end; addr += 0x400) { + cache32_unroll32(addr|ws,Index_Writeback_Inv_D); ++ } + } + + static inline void blast_dcache32_page(unsigned long page) +@@ -316,8 +404,9 @@ + unsigned long ws, addr; + + for (ws = 0; ws < ws_end; ws += ws_inc) +- for (addr = start; addr < end; addr += 0x400) ++ for (addr = start; addr < end; addr += 0x400) { + cache32_unroll32(addr|ws,Index_Writeback_Inv_D); ++ } + } + + static inline void blast_icache32(void) +@@ -339,6 +428,7 @@ + unsigned long start = page; + unsigned long end = start + PAGE_SIZE; + ++ BCM4710_FILL_TLB(start); + do { + cache32_unroll32(start,Hit_Invalidate_I); + start += 0x400; +@@ -443,6 +533,7 @@ + unsigned long start = page; + unsigned long end = start + PAGE_SIZE; + ++ BCM4710_FILL_TLB(start); + do { + cache64_unroll32(start,Hit_Invalidate_I); + start += 0x800; +diff -urN linux.old/include/asm-mips/stackframe.h linux.dev/include/asm-mips/stackframe.h +--- linux.old/include/asm-mips/stackframe.h 2005-07-05 16:46:49.000000000 +0200 ++++ linux.dev/include/asm-mips/stackframe.h 2005-07-06 11:23:56.000000000 +0200 +@@ -209,6 +209,20 @@ + + #endif + ++#if defined(CONFIG_BCM4710) || defined(CONFIG_BCM4704) ++ ++#undef RESTORE_SP_AND_RET ++#define RESTORE_SP_AND_RET \ ++ lw sp, PT_R29(sp); \ ++ .set mips3; \ ++ nop; \ ++ nop; \ ++ eret; \ ++ .set mips0 ++ ++#endif ++ ++ + #define RESTORE_SP \ + lw sp, PT_R29(sp); \ + +diff -urN linux.old/mm/memory.c linux.dev/mm/memory.c +--- linux.old/mm/memory.c 2005-04-04 03:42:20.000000000 +0200 ++++ linux.dev/mm/memory.c 2005-07-06 11:23:56.000000000 +0200 +@@ -925,6 +925,7 @@ + flush_page_to_ram(new_page); + flush_cache_page(vma, address); + establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)))); ++ flush_icache_page(vma, new_page); + } + + /* |