50902d09d49d38aa6ee0a9e8c19a237056eabf74
[openwrt.git] / target / linux / generic / patches-3.10 / 002-Revert-MIPS-Allow-ASID-size-to-be-determined-at-boot.patch
1 From 48c4ac976ae995f263cde8f09578de86bc8e9f1d Mon Sep 17 00:00:00 2001
2 From: David Daney <david.daney@cavium.com>
3 Date: Mon, 13 May 2013 13:56:44 -0700
4 Subject: [PATCH 2/3] Revert "MIPS: Allow ASID size to be determined at boot
5  time."
6
7 This reverts commit d532f3d26716a39dfd4b88d687bd344fbe77e390.
8
9 The original commit has several problems:
10
11 1) Doesn't work with 64-bit kernels.
12
13 2) Calls TLBMISS_HANDLER_SETUP() before the code is generated.
14
15 3) Calls TLBMISS_HANDLER_SETUP() twice in per_cpu_trap_init() when
16    only one call is needed.
17
18 [ralf@linux-mips.org: Also revert the bits of the ASID patch which were
19 hidden in the KVM merge.]
20
21 Signed-off-by: David Daney <david.daney@cavium.com>
22 Cc: linux-mips@linux-mips.org
23 Cc: linux-kernel@vger.kernel.org
24 Cc: "Steven J. Hill" <Steven.Hill@imgtec.com>
25 Cc: David Daney <david.daney@cavium.com>
26 Patchwork: https://patchwork.linux-mips.org/patch/5242/
27 Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
28 ---
29  arch/mips/include/asm/kvm_host.h    |    2 +-
30  arch/mips/include/asm/mmu_context.h |   95 +++++++++++++----------------------
31  arch/mips/kernel/genex.S            |    2 +-
32  arch/mips/kernel/smtc.c             |   10 ++--
33  arch/mips/kernel/traps.c            |    6 +--
34  arch/mips/kvm/kvm_mips_emul.c       |   29 ++++++-----
35  arch/mips/kvm/kvm_tlb.c             |   26 ++++++----
36  arch/mips/lib/dump_tlb.c            |    5 +-
37  arch/mips/lib/r3k_dump_tlb.c        |    7 ++-
38  arch/mips/mm/tlb-r3k.c              |   20 ++++----
39  arch/mips/mm/tlb-r4k.c              |    2 +-
40  arch/mips/mm/tlb-r8k.c              |    2 +-
41  arch/mips/mm/tlbex.c                |   49 ------------------
42  13 files changed, 93 insertions(+), 162 deletions(-)
43
44 --- a/arch/mips/include/asm/kvm_host.h
45 +++ b/arch/mips/include/asm/kvm_host.h
46 @@ -336,7 +336,7 @@ enum emulation_result {
47  #define VPN2_MASK           0xffffe000
48  #define TLB_IS_GLOBAL(x)    (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
49  #define TLB_VPN2(x)         ((x).tlb_hi & VPN2_MASK)
50 -#define TLB_ASID(x)         (ASID_MASK((x).tlb_hi))
51 +#define TLB_ASID(x)         ((x).tlb_hi & ASID_MASK)
52  #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
53  
54  struct kvm_mips_tlb {
55 --- a/arch/mips/include/asm/mmu_context.h
56 +++ b/arch/mips/include/asm/mmu_context.h
57 @@ -67,68 +67,45 @@ extern unsigned long pgd_current[];
58         TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
59  #endif
60  #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
61 +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
62  
63 -#define ASID_INC(asid)                                         \
64 -({                                                             \
65 -       unsigned long __asid = asid;                            \
66 -       __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t"          \
67 -       ".section\t__asid_inc,\"a\"\n\t"                        \
68 -       ".word\t1b\n\t"                                         \
69 -       ".previous"                                             \
70 -       :"=r" (__asid)                                          \
71 -       :"0" (__asid));                                         \
72 -       __asid;                                                 \
73 -})
74 -#define ASID_MASK(asid)                                                \
75 -({                                                             \
76 -       unsigned long __asid = asid;                            \
77 -       __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t"      \
78 -       ".section\t__asid_mask,\"a\"\n\t"                       \
79 -       ".word\t1b\n\t"                                         \
80 -       ".previous"                                             \
81 -       :"=r" (__asid)                                          \
82 -       :"r" (__asid));                                         \
83 -       __asid;                                                 \
84 -})
85 -#define ASID_VERSION_MASK                                      \
86 -({                                                             \
87 -       unsigned long __asid;                                   \
88 -       __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t"  \
89 -       ".section\t__asid_version_mask,\"a\"\n\t"               \
90 -       ".word\t1b\n\t"                                         \
91 -       ".previous"                                             \
92 -       :"=r" (__asid));                                        \
93 -       __asid;                                                 \
94 -})
95 -#define ASID_FIRST_VERSION                                     \
96 -({                                                             \
97 -       unsigned long __asid = asid;                            \
98 -       __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t"         \
99 -       ".section\t__asid_first_version,\"a\"\n\t"              \
100 -       ".word\t1b\n\t"                                         \
101 -       ".previous"                                             \
102 -       :"=r" (__asid));                                        \
103 -       __asid;                                                 \
104 -})
105 -
106 -#define ASID_FIRST_VERSION_R3000       0x1000
107 -#define ASID_FIRST_VERSION_R4000       0x100
108 -#define ASID_FIRST_VERSION_R8000       0x1000
109 -#define ASID_FIRST_VERSION_RM9000      0x1000
110 +#define ASID_INC       0x40
111 +#define ASID_MASK      0xfc0
112 +
113 +#elif defined(CONFIG_CPU_R8000)
114 +
115 +#define ASID_INC       0x10
116 +#define ASID_MASK      0xff0
117 +
118 +#elif defined(CONFIG_MIPS_MT_SMTC)
119 +
120 +#define ASID_INC       0x1
121 +extern unsigned long smtc_asid_mask;
122 +#define ASID_MASK      (smtc_asid_mask)
123 +#define HW_ASID_MASK   0xff
124 +/* End SMTC/34K debug hack */
125 +#else /* FIXME: not correct for R6000 */
126 +
127 +#define ASID_INC       0x1
128 +#define ASID_MASK      0xff
129  
130 -#ifdef CONFIG_MIPS_MT_SMTC
131 -#define SMTC_HW_ASID_MASK              0xff
132 -extern unsigned int smtc_asid_mask;
133  #endif
134  
135  #define cpu_context(cpu, mm)   ((mm)->context.asid[cpu])
136 -#define cpu_asid(cpu, mm)      ASID_MASK(cpu_context((cpu), (mm)))
137 +#define cpu_asid(cpu, mm)      (cpu_context((cpu), (mm)) & ASID_MASK)
138  #define asid_cache(cpu)                (cpu_data[cpu].asid_cache)
139  
140  static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
141  {
142  }
143  
144 +/*
145 + *  All unused by hardware upper bits will be considered
146 + *  as a software asid extension.
147 + */
148 +#define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
149 +#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
150 +
151  #ifndef CONFIG_MIPS_MT_SMTC
152  /* Normal, classic MIPS get_new_mmu_context */
153  static inline void
154 @@ -137,7 +114,7 @@ get_new_mmu_context(struct mm_struct *mm
155         extern void kvm_local_flush_tlb_all(void);
156         unsigned long asid = asid_cache(cpu);
157  
158 -       if (!ASID_MASK((asid = ASID_INC(asid)))) {
159 +       if (! ((asid += ASID_INC) & ASID_MASK) ) {
160                 if (cpu_has_vtag_icache)
161                         flush_icache_all();
162  #ifdef CONFIG_VIRTUALIZATION
163 @@ -200,7 +177,7 @@ static inline void switch_mm(struct mm_s
164          * free up the ASID value for use and flush any old
165          * instances of it from the TLB.
166          */
167 -       oldasid = ASID_MASK(read_c0_entryhi());
168 +       oldasid = (read_c0_entryhi() & ASID_MASK);
169         if(smtc_live_asid[mytlb][oldasid]) {
170                 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
171                 if(smtc_live_asid[mytlb][oldasid] == 0)
172 @@ -211,7 +188,7 @@ static inline void switch_mm(struct mm_s
173          * having ASID_MASK smaller than the hardware maximum,
174          * make sure no "soft" bits become "hard"...
175          */
176 -       write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
177 +       write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
178                          cpu_asid(cpu, next));
179         ehb(); /* Make sure it propagates to TCStatus */
180         evpe(mtflags);
181 @@ -264,15 +241,15 @@ activate_mm(struct mm_struct *prev, stru
182  #ifdef CONFIG_MIPS_MT_SMTC
183         /* See comments for similar code above */
184         mtflags = dvpe();
185 -       oldasid = ASID_MASK(read_c0_entryhi());
186 +       oldasid = read_c0_entryhi() & ASID_MASK;
187         if(smtc_live_asid[mytlb][oldasid]) {
188                 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
189                 if(smtc_live_asid[mytlb][oldasid] == 0)
190                          smtc_flush_tlb_asid(oldasid);
191         }
192         /* See comments for similar code above */
193 -       write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
194 -                        cpu_asid(cpu, next));
195 +       write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
196 +                        cpu_asid(cpu, next));
197         ehb(); /* Make sure it propagates to TCStatus */
198         evpe(mtflags);
199  #else
200 @@ -309,14 +286,14 @@ drop_mmu_context(struct mm_struct *mm, u
201  #ifdef CONFIG_MIPS_MT_SMTC
202                 /* See comments for similar code above */
203                 prevvpe = dvpe();
204 -               oldasid = ASID_MASK(read_c0_entryhi());
205 +               oldasid = (read_c0_entryhi() & ASID_MASK);
206                 if (smtc_live_asid[mytlb][oldasid]) {
207                         smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
208                         if(smtc_live_asid[mytlb][oldasid] == 0)
209                                 smtc_flush_tlb_asid(oldasid);
210                 }
211                 /* See comments for similar code above */
212 -               write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK)
213 +               write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
214                                 | cpu_asid(cpu, mm));
215                 ehb(); /* Make sure it propagates to TCStatus */
216                 evpe(prevvpe);
217 --- a/arch/mips/kernel/genex.S
218 +++ b/arch/mips/kernel/genex.S
219 @@ -493,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
220         .set    noreorder
221         /* check if TLB contains a entry for EPC */
222         MFC0    k1, CP0_ENTRYHI
223 -       andi    k1, 0xff        /* ASID_MASK patched at run-time!! */
224 +       andi    k1, 0xff        /* ASID_MASK */
225         MFC0    k0, CP0_EPC
226         PTR_SRL k0, _PAGE_SHIFT + 1
227         PTR_SLL k0, _PAGE_SHIFT + 1
228 --- a/arch/mips/kernel/smtc.c
229 +++ b/arch/mips/kernel/smtc.c
230 @@ -111,7 +111,7 @@ static int vpe0limit;
231  static int ipibuffers;
232  static int nostlb;
233  static int asidmask;
234 -unsigned int smtc_asid_mask = 0xff;
235 +unsigned long smtc_asid_mask = 0xff;
236  
237  static int __init vpe0tcs(char *str)
238  {
239 @@ -1395,7 +1395,7 @@ void smtc_get_new_mmu_context(struct mm_
240         asid = asid_cache(cpu);
241  
242         do {
243 -               if (!ASID_MASK(ASID_INC(asid))) {
244 +               if (!((asid += ASID_INC) & ASID_MASK) ) {
245                         if (cpu_has_vtag_icache)
246                                 flush_icache_all();
247                         /* Traverse all online CPUs (hack requires contiguous range) */
248 @@ -1414,7 +1414,7 @@ void smtc_get_new_mmu_context(struct mm_
249                                                 mips_ihb();
250                                         }
251                                         tcstat = read_tc_c0_tcstatus();
252 -                                       smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i);
253 +                                       smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
254                                         if (!prevhalt)
255                                                 write_tc_c0_tchalt(0);
256                                 }
257 @@ -1423,7 +1423,7 @@ void smtc_get_new_mmu_context(struct mm_
258                                 asid = ASID_FIRST_VERSION;
259                         local_flush_tlb_all();  /* start new asid cycle */
260                 }
261 -       } while (smtc_live_asid[tlb][ASID_MASK(asid)]);
262 +       } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
263  
264         /*
265          * SMTC shares the TLB within VPEs and possibly across all VPEs.
266 @@ -1461,7 +1461,7 @@ void smtc_flush_tlb_asid(unsigned long a
267                 tlb_read();
268                 ehb();
269                 ehi = read_c0_entryhi();
270 -               if (ASID_MASK(ehi) == asid) {
271 +               if ((ehi & ASID_MASK) == asid) {
272                     /*
273                      * Invalidate only entries with specified ASID,
274                      * makiing sure all entries differ.
275 --- a/arch/mips/kernel/traps.c
276 +++ b/arch/mips/kernel/traps.c
277 @@ -1656,7 +1656,6 @@ void __cpuinit per_cpu_trap_init(bool is
278         unsigned int cpu = smp_processor_id();
279         unsigned int status_set = ST0_CU0;
280         unsigned int hwrena = cpu_hwrena_impl_bits;
281 -       unsigned long asid = 0;
282  #ifdef CONFIG_MIPS_MT_SMTC
283         int secondaryTC = 0;
284         int bootTC = (cpu == 0);
285 @@ -1740,9 +1739,8 @@ void __cpuinit per_cpu_trap_init(bool is
286         }
287  #endif /* CONFIG_MIPS_MT_SMTC */
288  
289 -       asid = ASID_FIRST_VERSION;
290 -       cpu_data[cpu].asid_cache = asid;
291 -       TLBMISS_HANDLER_SETUP();
292 +       if (!cpu_data[cpu].asid_cache)
293 +               cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
294  
295         atomic_inc(&init_mm.mm_count);
296         current->active_mm = &init_mm;
297 --- a/arch/mips/kvm/kvm_mips_emul.c
298 +++ b/arch/mips/kvm/kvm_mips_emul.c
299 @@ -525,16 +525,18 @@ kvm_mips_emulate_CP0(uint32_t inst, uint
300                                 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
301                                        kvm_read_c0_guest_ebase(cop0));
302                         } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
303 -                               uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
304 +                               uint32_t nasid =
305 +                                   vcpu->arch.gprs[rt] & ASID_MASK;
306                                 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
307                                     &&
308 -                                   (ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
309 -                                     != nasid)) {
310 +                                   ((kvm_read_c0_guest_entryhi(cop0) &
311 +                                     ASID_MASK) != nasid)) {
312  
313                                         kvm_debug
314                                             ("MTCz, change ASID from %#lx to %#lx\n",
315 -                                            ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
316 -                                            ASID_MASK(vcpu->arch.gprs[rt]));
317 +                                            kvm_read_c0_guest_entryhi(cop0) &
318 +                                            ASID_MASK,
319 +                                            vcpu->arch.gprs[rt] & ASID_MASK);
320  
321                                         /* Blow away the shadow host TLBs */
322                                         kvm_mips_flush_host_tlb(1);
323 @@ -986,7 +988,8 @@ kvm_mips_emulate_cache(uint32_t inst, ui
324                  * resulting handler will do the right thing
325                  */
326                 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
327 -                                                 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
328 +                                                 (kvm_read_c0_guest_entryhi
329 +                                                  (cop0) & ASID_MASK));
330  
331                 if (index < 0) {
332                         vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
333 @@ -1151,7 +1154,7 @@ kvm_mips_emulate_tlbmiss_ld(unsigned lon
334         struct kvm_vcpu_arch *arch = &vcpu->arch;
335         enum emulation_result er = EMULATE_DONE;
336         unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
337 -                               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
338 +                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
339  
340         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
341                 /* save old pc */
342 @@ -1198,7 +1201,7 @@ kvm_mips_emulate_tlbinv_ld(unsigned long
343         enum emulation_result er = EMULATE_DONE;
344         unsigned long entryhi =
345                 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
346 -               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
347 +               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
348  
349         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
350                 /* save old pc */
351 @@ -1243,7 +1246,7 @@ kvm_mips_emulate_tlbmiss_st(unsigned lon
352         struct kvm_vcpu_arch *arch = &vcpu->arch;
353         enum emulation_result er = EMULATE_DONE;
354         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
355 -                               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
356 +                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
357  
358         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
359                 /* save old pc */
360 @@ -1287,7 +1290,7 @@ kvm_mips_emulate_tlbinv_st(unsigned long
361         struct kvm_vcpu_arch *arch = &vcpu->arch;
362         enum emulation_result er = EMULATE_DONE;
363         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
364 -               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
365 +               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
366  
367         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
368                 /* save old pc */
369 @@ -1356,7 +1359,7 @@ kvm_mips_emulate_tlbmod(unsigned long ca
370  {
371         struct mips_coproc *cop0 = vcpu->arch.cop0;
372         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
373 -                               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
374 +                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
375         struct kvm_vcpu_arch *arch = &vcpu->arch;
376         enum emulation_result er = EMULATE_DONE;
377  
378 @@ -1783,8 +1786,8 @@ kvm_mips_handle_tlbmiss(unsigned long ca
379          */
380         index = kvm_mips_guest_tlb_lookup(vcpu,
381                                           (va & VPN2_MASK) |
382 -                                         ASID_MASK(kvm_read_c0_guest_entryhi
383 -                                          (vcpu->arch.cop0)));
384 +                                         (kvm_read_c0_guest_entryhi
385 +                                          (vcpu->arch.cop0) & ASID_MASK));
386         if (index < 0) {
387                 if (exccode == T_TLB_LD_MISS) {
388                         er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
389 --- a/arch/mips/kvm/kvm_tlb.c
390 +++ b/arch/mips/kvm/kvm_tlb.c
391 @@ -51,13 +51,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn);
392  
393  uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
394  {
395 -       return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
396 +       return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
397  }
398  
399  
400  uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
401  {
402 -       return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
403 +       return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
404  }
405  
406  inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
407 @@ -84,7 +84,7 @@ void kvm_mips_dump_host_tlbs(void)
408         old_pagemask = read_c0_pagemask();
409  
410         printk("HOST TLBs:\n");
411 -       printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
412 +       printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
413  
414         for (i = 0; i < current_cpu_data.tlbsize; i++) {
415                 write_c0_index(i);
416 @@ -428,7 +428,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm
417  
418         for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
419                 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
420 -                       (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
421 +                       (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
422                         index = i;
423                         break;
424                 }
425 @@ -626,7 +626,7 @@ kvm_get_new_mmu_context(struct mm_struct
426  {
427         unsigned long asid = asid_cache(cpu);
428  
429 -       if (!(ASID_MASK(ASID_INC(asid)))) {
430 +       if (!((asid += ASID_INC) & ASID_MASK)) {
431                 if (cpu_has_vtag_icache) {
432                         flush_icache_all();
433                 }
434 @@ -804,7 +804,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu
435         if (!newasid) {
436                 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
437                 if (current->flags & PF_VCPU) {
438 -                       write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
439 +                       write_c0_entryhi(vcpu->arch.
440 +                                        preempt_entryhi & ASID_MASK);
441                         ehb();
442                 }
443         } else {
444 @@ -816,11 +817,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu
445                  */
446                 if (current->flags & PF_VCPU) {
447                         if (KVM_GUEST_KERNEL_MODE(vcpu))
448 -                               write_c0_entryhi(ASID_MASK(vcpu->arch.
449 -                                                guest_kernel_asid[cpu]));
450 +                               write_c0_entryhi(vcpu->arch.
451 +                                                guest_kernel_asid[cpu] &
452 +                                                ASID_MASK);
453                         else
454 -                               write_c0_entryhi(ASID_MASK(vcpu->arch.
455 -                                                guest_user_asid[cpu]));
456 +                               write_c0_entryhi(vcpu->arch.
457 +                                                guest_user_asid[cpu] &
458 +                                                ASID_MASK);
459                         ehb();
460                 }
461         }
462 @@ -879,7 +882,8 @@ uint32_t kvm_get_inst(uint32_t *opc, str
463                             kvm_mips_guest_tlb_lookup(vcpu,
464                                                       ((unsigned long) opc & VPN2_MASK)
465                                                       |
466 -                                                     ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
467 +                                                     (kvm_read_c0_guest_entryhi
468 +                                                      (cop0) & ASID_MASK));
469                         if (index < 0) {
470                                 kvm_err
471                                     ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
472 --- a/arch/mips/lib/dump_tlb.c
473 +++ b/arch/mips/lib/dump_tlb.c
474 @@ -11,7 +11,6 @@
475  #include <asm/page.h>
476  #include <asm/pgtable.h>
477  #include <asm/tlbdebug.h>
478 -#include <asm/mmu_context.h>
479  
480  static inline const char *msk2str(unsigned int mask)
481  {
482 @@ -56,7 +55,7 @@ static void dump_tlb(int first, int last
483         s_pagemask = read_c0_pagemask();
484         s_entryhi = read_c0_entryhi();
485         s_index = read_c0_index();
486 -       asid = ASID_MASK(s_entryhi);
487 +       asid = s_entryhi & 0xff;
488  
489         for (i = first; i <= last; i++) {
490                 write_c0_index(i);
491 @@ -86,7 +85,7 @@ static void dump_tlb(int first, int last
492  
493                         printk("va=%0*lx asid=%02lx\n",
494                                width, (entryhi & ~0x1fffUL),
495 -                              ASID_MASK(entryhi));
496 +                              entryhi & 0xff);
497                         printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
498                                width,
499                                (entrylo0 << 6) & PAGE_MASK, c0,
500 --- a/arch/mips/lib/r3k_dump_tlb.c
501 +++ b/arch/mips/lib/r3k_dump_tlb.c
502 @@ -9,7 +9,6 @@
503  #include <linux/mm.h>
504  
505  #include <asm/mipsregs.h>
506 -#include <asm/mmu_context.h>
507  #include <asm/page.h>
508  #include <asm/pgtable.h>
509  #include <asm/tlbdebug.h>
510 @@ -22,7 +21,7 @@ static void dump_tlb(int first, int last
511         unsigned int asid;
512         unsigned long entryhi, entrylo0;
513  
514 -       asid = ASID_MASK(read_c0_entryhi());
515 +       asid = read_c0_entryhi() & 0xfc0;
516  
517         for (i = first; i <= last; i++) {
518                 write_c0_index(i<<8);
519 @@ -36,7 +35,7 @@ static void dump_tlb(int first, int last
520  
521                 /* Unused entries have a virtual address of KSEG0.  */
522                 if ((entryhi & 0xffffe000) != 0x80000000
523 -                   && (ASID_MASK(entryhi) == asid)) {
524 +                   && (entryhi & 0xfc0) == asid) {
525                         /*
526                          * Only print entries in use
527                          */
528 @@ -45,7 +44,7 @@ static void dump_tlb(int first, int last
529                         printk("va=%08lx asid=%08lx"
530                                "  [pa=%06lx n=%d d=%d v=%d g=%d]",
531                                (entryhi & 0xffffe000),
532 -                              ASID_MASK(entryhi),
533 +                              entryhi & 0xfc0,
534                                entrylo0 & PAGE_MASK,
535                                (entrylo0 & (1 << 11)) ? 1 : 0,
536                                (entrylo0 & (1 << 10)) ? 1 : 0,
537 --- a/arch/mips/mm/tlb-r3k.c
538 +++ b/arch/mips/mm/tlb-r3k.c
539 @@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
540  #endif
541  
542         local_irq_save(flags);
543 -       old_ctx = ASID_MASK(read_c0_entryhi());
544 +       old_ctx = read_c0_entryhi() & ASID_MASK;
545         write_c0_entrylo0(0);
546         entry = r3k_have_wired_reg ? read_c0_wired() : 8;
547         for (; entry < current_cpu_data.tlbsize; entry++) {
548 @@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_are
549  
550  #ifdef DEBUG_TLB
551                 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
552 -                       ASID_MASK(cpu_context(cpu, mm)), start, end);
553 +                       cpu_context(cpu, mm) & ASID_MASK, start, end);
554  #endif
555                 local_irq_save(flags);
556                 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
557                 if (size <= current_cpu_data.tlbsize) {
558 -                       int oldpid = ASID_MASK(read_c0_entryhi());
559 -                       int newpid = ASID_MASK(cpu_context(cpu, mm));
560 +                       int oldpid = read_c0_entryhi() & ASID_MASK;
561 +                       int newpid = cpu_context(cpu, mm) & ASID_MASK;
562  
563                         start &= PAGE_MASK;
564                         end += PAGE_SIZE - 1;
565 @@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area
566  #ifdef DEBUG_TLB
567                 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
568  #endif
569 -               newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm));
570 +               newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
571                 page &= PAGE_MASK;
572                 local_irq_save(flags);
573 -               oldpid = ASID_MASK(read_c0_entryhi());
574 +               oldpid = read_c0_entryhi() & ASID_MASK;
575                 write_c0_entryhi(page | newpid);
576                 BARRIER;
577                 tlb_probe();
578 @@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct
579         if (current->active_mm != vma->vm_mm)
580                 return;
581  
582 -       pid = ASID_MASK(read_c0_entryhi());
583 +       pid = read_c0_entryhi() & ASID_MASK;
584  
585  #ifdef DEBUG_TLB
586 -       if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) {
587 +       if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
588                 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
589                        (cpu_context(cpu, vma->vm_mm)), pid);
590         }
591 @@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entry
592  
593                 local_irq_save(flags);
594                 /* Save old context and create impossible VPN2 value */
595 -               old_ctx = ASID_MASK(read_c0_entryhi());
596 +               old_ctx = read_c0_entryhi() & ASID_MASK;
597                 old_pagemask = read_c0_pagemask();
598                 w = read_c0_wired();
599                 write_c0_wired(w + 1);
600 @@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entry
601  #endif
602  
603                 local_irq_save(flags);
604 -               old_ctx = ASID_MASK(read_c0_entryhi());
605 +               old_ctx = read_c0_entryhi() & ASID_MASK;
606                 write_c0_entrylo0(entrylo0);
607                 write_c0_entryhi(entryhi);
608                 write_c0_index(wired);
609 --- a/arch/mips/mm/tlb-r4k.c
610 +++ b/arch/mips/mm/tlb-r4k.c
611 @@ -287,7 +287,7 @@ void __update_tlb(struct vm_area_struct
612  
613         ENTER_CRITICAL(flags);
614  
615 -       pid = ASID_MASK(read_c0_entryhi());
616 +       pid = read_c0_entryhi() & ASID_MASK;
617         address &= (PAGE_MASK << 1);
618         write_c0_entryhi(address | pid);
619         pgdp = pgd_offset(vma->vm_mm, address);
620 --- a/arch/mips/mm/tlb-r8k.c
621 +++ b/arch/mips/mm/tlb-r8k.c
622 @@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct
623         if (current->active_mm != vma->vm_mm)
624                 return;
625  
626 -       pid = ASID_MASK(read_c0_entryhi());
627 +       pid = read_c0_entryhi() & ASID_MASK;
628  
629         local_irq_save(flags);
630         address &= PAGE_MASK;
631 --- a/arch/mips/mm/tlbex.c
632 +++ b/arch/mips/mm/tlbex.c
633 @@ -29,7 +29,6 @@
634  #include <linux/init.h>
635  #include <linux/cache.h>
636  
637 -#include <asm/mmu_context.h>
638  #include <asm/cacheflush.h>
639  #include <asm/pgtable.h>
640  #include <asm/war.h>
641 @@ -306,48 +305,6 @@ static struct uasm_reloc relocs[128] __c
642  static int check_for_high_segbits __cpuinitdata;
643  #endif
644  
645 -static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
646 -                                       unsigned int i_const)
647 -{
648 -       unsigned int **p, *ip;
649 -
650 -       for (p = start; p < stop; p++) {
651 -               ip = *p;
652 -               *ip = (*ip & 0xffff0000) | i_const;
653 -       }
654 -       local_flush_icache_range((unsigned long)*p, (unsigned long)((*p) + 1));
655 -}
656 -
657 -#define asid_insn_fixup(section, const)                                        \
658 -do {                                                                   \
659 -       extern unsigned int *__start_ ## section;                       \
660 -       extern unsigned int *__stop_ ## section;                        \
661 -       insn_fixup(&__start_ ## section, &__stop_ ## section, const);   \
662 -} while(0)
663 -
664 -/*
665 - * Caller is assumed to flush the caches before the first context switch.
666 - */
667 -static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
668 -                                unsigned int version_mask,
669 -                                unsigned int first_version)
670 -{
671 -       extern asmlinkage void handle_ri_rdhwr_vivt(void);
672 -       unsigned long *vivt_exc;
673 -
674 -       asid_insn_fixup(__asid_inc, inc);
675 -       asid_insn_fixup(__asid_mask, mask);
676 -       asid_insn_fixup(__asid_version_mask, version_mask);
677 -       asid_insn_fixup(__asid_first_version, first_version);
678 -
679 -       /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
680 -       vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
681 -       vivt_exc++;
682 -       *vivt_exc = (*vivt_exc & ~mask) | mask;
683 -
684 -       current_cpu_data.asid_cache = first_version;
685 -}
686 -
687  static int check_for_high_segbits __cpuinitdata;
688  
689  static unsigned int kscratch_used_mask __cpuinitdata;
690 @@ -2226,7 +2183,6 @@ void __cpuinit build_tlb_refill_handler(
691         case CPU_TX3922:
692         case CPU_TX3927:
693  #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
694 -               setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
695                 if (cpu_has_local_ebase)
696                         build_r3000_tlb_refill_handler();
697                 if (!run_once) {
698 @@ -2252,11 +2208,6 @@ void __cpuinit build_tlb_refill_handler(
699                 break;
700  
701         default:
702 -#ifndef CONFIG_MIPS_MT_SMTC
703 -               setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
704 -#else
705 -               setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
706 -#endif
707                 if (!run_once) {
708                         scratch_reg = allocate_kscratch();
709  #ifdef CONFIG_MIPS_PGD_C0_CONTEXT