[ar71xx] add patches for 2.6.31
[openwrt.git] / target / linux / coldfire / patches / 062-mcfv4e_cache_split.patch
1 From 940b4fea5ebfde3abe03c6469a57c01ee961497a Mon Sep 17 00:00:00 2001
2 From: Kurt Mahan <kmahan@freescale.com>
3 Date: Wed, 18 Jun 2008 15:20:21 -0600
4 Subject: [PATCH] Split 547x/548x and 5445x cache routines into separate files.
5
6 LTIBName: mcfv4e-cache-split
7 Signed-off-by: Kurt Mahan <kmahan@freescale.com>
8 ---
9  include/asm-m68k/cf_5445x_cacheflush.h |  447 ++++++++++++++++++++++++++++++++
10  include/asm-m68k/cf_548x_cacheflush.h  |  259 ++++++++++++++++++
11  include/asm-m68k/cf_cacheflush.h       |  244 +-----------------
12  3 files changed, 711 insertions(+), 239 deletions(-)
13  create mode 100644 include/asm-m68k/cf_5445x_cacheflush.h
14  create mode 100644 include/asm-m68k/cf_548x_cacheflush.h
15
16 --- /dev/null
17 +++ b/include/asm-m68k/cf_5445x_cacheflush.h
18 @@ -0,0 +1,447 @@
19 +/*
20 + * include/asm-m68k/cf_5445x_cacheflush.h - Coldfire 5445x Cache
21 + *
22 + * Based on include/asm-m68k/cacheflush.h
23 + *
24 + * Coldfire pieces by:
25 + *   Kurt Mahan kmahan@freescale.com
26 + *
27 + * Copyright Freescale Semiconductor, Inc. 2007, 2008
28 + *
29 + * This program is free software; you can redistribute  it and/or modify it
30 + * under  the terms of  the GNU General  Public License as published by the
31 + * Free Software Foundation;  either version 2 of the  License, or (at your
32 + * option) any later version.
33 + */
34 +#ifndef M68K_CF_5445x_CACHEFLUSH_H
35 +#define M68K_CF_5445x_CACHEFLUSH_H
36 +
37 +#include <asm/cfcache.h>
38 +
39 +/*
40 + * Coldfire Cache Model
41 + *
42 + * The Coldfire processors use a Harvard architecture cache configured
43 + * as four-way set associative.  The cache does not implement bus snooping
44 + * so cache coherency with other masters must be maintained in software.
45 + *
46 + * The cache is managed via the CPUSHL instruction in conjunction with
47 + * bits set in the CACR (cache control register).  Currently the code
48 + * uses the CPUSHL enhancement which adds the ability to
49 + * invalidate/clear/push a cacheline by physical address.  This feature
50 + * is designated in the Hardware Configuration Register [D1-CPES].
51 + *
52 + * CACR Bits:
53 + *     DPI[28]         cpushl invalidate disable for d-cache
54 + *     IDPI[12]        cpushl invalidate disable for i-cache
55 + *     SPA[14]         cpushl search by physical address
56 + *     IVO[20]         cpushl invalidate only
57 + *
58 + * Random Terminology:
59 + *  * invalidate = reset the cache line's valid bit
60 + *  * push = generate a line-sized store of the data if its contents are marked
61 + *          as modifed (the modified flag is cleared after the store)
62 + *  * clear = push + invalidate
63 + */
64 +
65 +/**
66 + * flush_icache - Flush all of the instruction cache
67 + */
68 +static inline void flush_icache(void)
69 +{
70 +       asm volatile("nop\n"
71 +                    "moveq%.l  #0,%%d0\n"
72 +                    "moveq%.l  #0,%%d1\n"
73 +                    "move%.l   %%d0,%%a0\n"
74 +                    "1:\n"
75 +                    "cpushl    %%ic,(%%a0)\n"
76 +                    "add%.l    #0x0010,%%a0\n"
77 +                    "addq%.l   #1,%%d1\n"
78 +                    "cmpi%.l   %0,%%d1\n"
79 +                    "bne       1b\n"
80 +                    "moveq%.l  #0,%%d1\n"
81 +                    "addq%.l   #1,%%d0\n"
82 +                    "move%.l   %%d0,%%a0\n"
83 +                    "cmpi%.l   #4,%%d0\n"
84 +                    "bne       1b\n"
85 +                    : : "i" (CACHE_SETS)
86 +                    : "a0", "d0", "d1");
87 +}
88 +
89 +/**
90 + * flush_dcache - Flush all of the data cache
91 + */
92 +static inline void flush_dcache(void)
93 +{
94 +       asm volatile("nop\n"
95 +                    "moveq%.l  #0,%%d0\n"
96 +                    "moveq%.l  #0,%%d1\n"
97 +                    "move%.l   %%d0,%%a0\n"
98 +                    "1:\n"
99 +                    "cpushl    %%dc,(%%a0)\n"
100 +                    "add%.l    #0x0010,%%a0\n"
101 +                    "addq%.l   #1,%%d1\n"
102 +                    "cmpi%.l   %0,%%d1\n"
103 +                    "bne       1b\n"
104 +                    "moveq%.l  #0,%%d1\n"
105 +                    "addq%.l   #1,%%d0\n"
106 +                    "move%.l   %%d0,%%a0\n"
107 +                    "cmpi%.l   #4,%%d0\n"
108 +                    "bne       1b\n"
109 +                    : : "i" (CACHE_SETS)
110 +                    : "a0", "d0", "d1");
111 +}
112 +
113 +/**
114 + * flush_bcache - Flush all of both caches
115 + */
116 +static inline void flush_bcache(void)
117 +{
118 +       asm volatile("nop\n"
119 +                    "moveq%.l  #0,%%d0\n"
120 +                    "moveq%.l  #0,%%d1\n"
121 +                    "move%.l   %%d0,%%a0\n"
122 +                    "1:\n"
123 +                    "cpushl    %%bc,(%%a0)\n"
124 +                    "add%.l    #0x0010,%%a0\n"
125 +                    "addq%.l   #1,%%d1\n"
126 +                    "cmpi%.l   %0,%%d1\n"
127 +                    "bne       1b\n"
128 +                    "moveq%.l  #0,%%d1\n"
129 +                    "addq%.l   #1,%%d0\n"
130 +                    "move%.l   %%d0,%%a0\n"
131 +                    "cmpi%.l   #4,%%d0\n"
132 +                    "bne       1b\n"
133 +                    : : "i" (CACHE_SETS)
134 +                    : "a0", "d0", "d1");
135 +}
136 +
137 +/**
138 + * cf_cache_clear - invalidate cache
139 + * @paddr: starting physical address
140 + * @len: number of bytes
141 + *
142 + * Invalidate cache lines starting at paddr for len bytes.
143 + * Those lines are not pushed.
144 + */
145 +static inline void cf_cache_clear(unsigned long paddr, int len)
146 +{
147 +       /* number of lines */
148 +       len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
149 +       if (len == 0)
150 +               return;
151 +
152 +       /* align on set boundary */
153 +       paddr &= 0xfffffff0;
154 +
155 +       asm volatile("nop\n"
156 +                    "move%.l   %2,%%d0\n"
157 +                    "or%.l     %3,%%d0\n"
158 +                    "movec     %%d0,%%cacr\n"
159 +                    "move%.l   %0,%%a0\n"
160 +                    "move%.l   %1,%%d0\n"
161 +                    "1:\n"
162 +                    "cpushl    %%bc,(%%a0)\n"
163 +                    "lea       0x10(%%a0),%%a0\n"
164 +                    "subq%.l   #1,%%d0\n"
165 +                    "bne%.b    1b\n"
166 +                    "movec     %2,%%cacr\n"
167 +                    : : "a" (paddr), "r" (len),
168 +                        "r" (shadow_cacr),
169 +                        "i" (CF_CACR_SPA+CF_CACR_IVO)
170 +                    : "a0", "d0");
171 +}
172 +
173 +/**
174 + * cf_cache_push - Push dirty cache out with no invalidate
175 + * @paddr: starting physical address
176 + * @len: number of bytes
177 + *
178 + * Push the any dirty lines starting at paddr for len bytes.
179 + * Those lines are not invalidated.
180 + */
181 +static inline void cf_cache_push(unsigned long paddr, int len)
182 +{
183 +       /* number of lines */
184 +       len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
185 +       if (len == 0)
186 +               return;
187 +
188 +       /* align on set boundary */
189 +       paddr &= 0xfffffff0;
190 +
191 +       asm volatile("nop\n"
192 +                    "move%.l   %2,%%d0\n"
193 +                    "or%.l     %3,%%d0\n"
194 +                    "movec     %%d0,%%cacr\n"
195 +                    "move%.l   %0,%%a0\n"
196 +                    "move%.l   %1,%%d0\n"
197 +                    "1:\n"
198 +                    "cpushl    %%bc,(%%a0)\n"
199 +                    "lea       0x10(%%a0),%%a0\n"
200 +                    "subq%.l   #1,%%d0\n"
201 +                    "bne.b     1b\n"
202 +                    "movec     %2,%%cacr\n"
203 +                    : : "a" (paddr), "r" (len),
204 +                        "r" (shadow_cacr),
205 +                        "i" (CF_CACR_SPA+CF_CACR_DPI+CF_CACR_IDPI)
206 +                    : "a0", "d0");
207 +}
208 +
209 +/**
210 + * cf_cache_flush - Push dirty cache out and invalidate
211 + * @paddr: starting physical address
212 + * @len: number of bytes
213 + *
214 + * Push the any dirty lines starting at paddr for len bytes and
215 + * invalidate those lines.
216 + */
217 +static inline void cf_cache_flush(unsigned long paddr, int len)
218 +{
219 +       /* number of lines */
220 +       len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
221 +       if (len == 0)
222 +               return;
223 +
224 +       /* align on set boundary */
225 +       paddr &= 0xfffffff0;
226 +
227 +       asm volatile("nop\n"
228 +                    "move%.l   %2,%%d0\n"
229 +                    "or%.l     %3,%%d0\n"
230 +                    "movec     %%d0,%%cacr\n"
231 +                    "move%.l   %0,%%a0\n"
232 +                    "move%.l   %1,%%d0\n"
233 +                    "1:\n"
234 +                    "cpushl    %%bc,(%%a0)\n"
235 +                    "lea       0x10(%%a0),%%a0\n"
236 +                    "subq%.l   #1,%%d0\n"
237 +                    "bne.b     1b\n"
238 +                    "movec     %2,%%cacr\n"
239 +                    : : "a" (paddr), "r" (len),
240 +                        "r" (shadow_cacr),
241 +                        "i" (CF_CACR_SPA)
242 +                    : "a0", "d0");
243 +}
244 +
245 +/**
246 + * cf_cache_flush_range - Push dirty data/inst cache in range out and invalidate
247 + * @vstart - starting virtual address
248 + * @vend: ending virtual address
249 + *
250 + * Push the any dirty data/instr lines starting at paddr for len bytes and
251 + * invalidate those lines.
252 + */
253 +static inline void cf_cache_flush_range(unsigned long vstart, unsigned long vend)
254 +{
255 +       int len;
256 +
257 +       /* align on set boundary */
258 +       vstart &= 0xfffffff0;
259 +       vend = PAGE_ALIGN((vend + (CACHE_LINE_SIZE-1))) & 0xfffffff0;
260 +       len = vend - vstart;
261 +       if (len == 0)
262 +               return;
263 +       vstart = __pa(vstart);
264 +       vend = vstart + len;
265 +
266 +       asm volatile("nop\n"
267 +                    "move%.l   %2,%%d0\n"
268 +                    "or%.l     %3,%%d0\n"
269 +                    "movec     %%d0,%%cacr\n"
270 +                    "move%.l   %0,%%a0\n"
271 +                    "move%.l   %1,%%a1\n"
272 +                    "1:\n"
273 +                    "cpushl    %%bc,(%%a0)\n"
274 +                    "lea       0x10(%%a0),%%a0\n"
275 +                    "cmpa%.l   %%a0,%%a1\n"
276 +                    "bne.b     1b\n"
277 +                    "movec     %2,%%cacr\n"
278 +                    : /* no return */
279 +                    : "a" (vstart), "a" (vend),
280 +                      "r" (shadow_cacr),
281 +                      "i" (CF_CACR_SPA)
282 +                    : "a0", "a1", "d0");
283 +}
284 +
285 +/**
286 + * cf_dcache_flush_range - Push dirty data cache in range out and invalidate
287 + * @vstart - starting virtual address
288 + * @vend: ending virtual address
289 + *
290 + * Push the any dirty data lines starting at paddr for len bytes and
291 + * invalidate those lines.
292 + */
293 +static inline void cf_dcache_flush_range(unsigned long vstart, unsigned long vend)
294 +{
295 +       /* align on set boundary */
296 +       vstart &= 0xfffffff0;
297 +       vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
298 +
299 +       asm volatile("nop\n"
300 +                    "move%.l   %2,%%d0\n"
301 +                    "or%.l     %3,%%d0\n"
302 +                    "movec     %%d0,%%cacr\n"
303 +                    "move%.l   %0,%%a0\n"
304 +                    "move%.l   %1,%%a1\n"
305 +                    "1:\n"
306 +                    "cpushl    %%dc,(%%a0)\n"
307 +                    "lea       0x10(%%a0),%%a0\n"
308 +                    "cmpa%.l   %%a0,%%a1\n"
309 +                    "bne.b     1b\n"
310 +                    "movec     %2,%%cacr\n"
311 +                    : /* no return */
312 +                    : "a" (__pa(vstart)), "a" (__pa(vend)),
313 +                      "r" (shadow_cacr),
314 +                      "i" (CF_CACR_SPA)
315 +                    : "a0", "a1", "d0");
316 +}
317 +
318 +/**
319 + * cf_icache_flush_range - Push dirty inst cache in range out and invalidate
320 + * @vstart - starting virtual address
321 + * @vend: ending virtual address
322 + *
323 + * Push the any dirty instr lines starting at paddr for len bytes and
324 + * invalidate those lines.  This should just be an invalidate since you
325 + * shouldn't be able to have dirty instruction cache.
326 + */
327 +static inline void cf_icache_flush_range(unsigned long vstart, unsigned long vend)
328 +{
329 +       /* align on set boundary */
330 +       vstart &= 0xfffffff0;
331 +       vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
332 +
333 +       asm volatile("nop\n"
334 +                    "move%.l   %2,%%d0\n"
335 +                    "or%.l     %3,%%d0\n"
336 +                    "movec     %%d0,%%cacr\n"
337 +                    "move%.l   %0,%%a0\n"
338 +                    "move%.l   %1,%%a1\n"
339 +                    "1:\n"
340 +                    "cpushl    %%ic,(%%a0)\n"
341 +                    "lea       0x10(%%a0),%%a0\n"
342 +                    "cmpa%.l   %%a0,%%a1\n"
343 +                    "bne.b     1b\n"
344 +                    "movec     %2,%%cacr\n"
345 +                    : /* no return */
346 +                    : "a" (__pa(vstart)), "a" (__pa(vend)),
347 +                      "r" (shadow_cacr),
348 +                      "i" (CF_CACR_SPA)
349 +                    : "a0", "a1", "d0");
350 +}
351 +
352 +/**
353 + * flush_cache_mm - Flush an mm_struct
354 + * @mm: mm_struct to flush
355 + */
356 +static inline void flush_cache_mm(struct mm_struct *mm)
357 +{
358 +       if (mm == current->mm)
359 +               flush_bcache();
360 +}
361 +
362 +#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
363 +
364 +/**
365 + * flush_cache_range - Flush a cache range
366 + * @vma: vma struct
367 + * @start: Starting address
368 + * @end: Ending address
369 + *
370 + * flush_cache_range must be a macro to avoid a dependency on
371 + * linux/mm.h which includes this file.
372 + */
373 +static inline void flush_cache_range(struct vm_area_struct *vma,
374 +       unsigned long start, unsigned long end)
375 +{
376 +       if (vma->vm_mm == current->mm)
377 +               cf_cache_flush_range(start, end);
378 +}
379 +
380 +/**
381 + * flush_cache_page - Flush a page of the cache
382 + * @vma: vma struct
383 + * @vmaddr:
384 + * @pfn: page numer
385 + *
386 + * flush_cache_page must be a macro to avoid a dependency on
387 + * linux/mm.h which includes this file.
388 + */
389 +static inline void flush_cache_page(struct vm_area_struct *vma,
390 +       unsigned long vmaddr, unsigned long pfn)
391 +{
392 +       if (vma->vm_mm == current->mm)
393 +               cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
394 +}
395 +
396 +/**
397 + * __flush_page_to_ram - Push a page out of the cache
398 + * @vaddr: Virtual address at start of page
399 + *
400 + * Push the page at kernel virtual address *vaddr* and clear
401 + * the icache.
402 + */
403 +static inline void __flush_page_to_ram(void *vaddr)
404 +{
405 +       asm volatile("nop\n"
406 +                    "move%.l   %2,%%d0\n"
407 +                    "or%.l     %3,%%d0\n"
408 +                    "movec     %%d0,%%cacr\n"
409 +                    "move%.l   %0,%%d0\n"
410 +                    "and%.l    #0xfffffff0,%%d0\n"
411 +                    "move%.l   %%d0,%%a0\n"
412 +                    "move%.l   %1,%%d0\n"
413 +                    "1:\n"
414 +                    "cpushl    %%bc,(%%a0)\n"
415 +                    "lea       0x10(%%a0),%%a0\n"
416 +                    "subq%.l   #1,%%d0\n"
417 +                    "bne.b     1b\n"
418 +                    "movec     %2,%%cacr\n"
419 +                    : : "a" (__pa(vaddr)), "i" (PAGE_SIZE / CACHE_LINE_SIZE),
420 +                        "r" (shadow_cacr), "i" (CF_CACR_SPA)
421 +                    : "a0", "d0");
422 +}
423 +
424 +/*
425 + * Various defines for the kernel.
426 + */
427 +
428 +extern void cache_clear(unsigned long paddr, int len);
429 +extern void cache_push(unsigned long paddr, int len);
430 +extern void flush_icache_range(unsigned long address, unsigned long endaddr);
431 +
432 +#define flush_cache_all()                      flush_bcache()
433 +#define flush_cache_vmap(start, end)           flush_bcache()
434 +#define flush_cache_vunmap(start, end)         flush_bcache()
435 +
436 +#define flush_dcache_range(vstart, vend)       cf_dcache_flush_range(vstart, vend)
437 +#define flush_dcache_page(page)                        __flush_page_to_ram(page_address(page))
438 +#define flush_dcache_mmap_lock(mapping)                do { } while (0)
439 +#define flush_dcache_mmap_unlock(mapping)      do { } while (0)
440 +
441 +#define flush_icache_page(vma, page)           __flush_page_to_ram(page_address(page))
442 +
443 +/**
444 + * copy_to_user_page - Copy memory to user page
445 + */
446 +static inline void copy_to_user_page(struct vm_area_struct *vma,
447 +                                    struct page *page, unsigned long vaddr,
448 +                                    void *dst, void *src, int len)
449 +{
450 +       memcpy(dst, src, len);
451 +       cf_cache_flush(page_to_phys(page), PAGE_SIZE);
452 +}
453 +
454 +/**
455 + * copy_from_user_page - Copy memory from user page
456 + */
457 +static inline void copy_from_user_page(struct vm_area_struct *vma,
458 +                                      struct page *page, unsigned long vaddr,
459 +                                      void *dst, void *src, int len)
460 +{
461 +       cf_cache_flush(page_to_phys(page), PAGE_SIZE);
462 +       memcpy(dst, src, len);
463 +}
464 +
465 +#endif /* M68K_CF_5445x_CACHEFLUSH_H */
466 --- /dev/null
467 +++ b/include/asm-m68k/cf_548x_cacheflush.h
468 @@ -0,0 +1,259 @@
469 +/*
470 + * include/asm-m68k/cf_548x_cacheflush.h - Coldfire 547x/548x Cache
471 + *
472 + * Based on include/asm-m68k/cacheflush.h
473 + *
474 + * Coldfire pieces by:
475 + *   Kurt Mahan kmahan@freescale.com
476 + *
477 + * Copyright Freescale Semiconductor, Inc. 2007, 2008
478 + *
479 + * This program is free software; you can redistribute  it and/or modify it
480 + * under  the terms of  the GNU General  Public License as published by the
481 + * Free Software Foundation;  either version 2 of the  License, or (at your
482 + * option) any later version.
483 + */
484 +#ifndef M68K_CF_548x_CACHEFLUSH_H
485 +#define M68K_CF_548x_CACHEFLUSH_H
486 +
487 +#include <asm/cfcache.h>
488 +/*
489 + * Cache handling functions
490 + */
491 +
492 +#define flush_icache()                                         \
493 +({                                                             \
494 +  unsigned long set;                                           \
495 +  unsigned long start_set;                                     \
496 +  unsigned long end_set;                                       \
497 +                                                               \
498 +  start_set = 0;                                               \
499 +  end_set = (unsigned long)LAST_DCACHE_ADDR;                   \
500 +                                                               \
501 +  for (set = start_set; set <= end_set; set += (0x10 - 3)) {   \
502 +    asm volatile("cpushl %%ic,(%0)\n"                          \
503 +                 "\taddq%.l #1,%0\n"                           \
504 +                 "\tcpushl %%ic,(%0)\n"                                \
505 +                 "\taddq%.l #1,%0\n"                           \
506 +                 "\tcpushl %%ic,(%0)\n"                                \
507 +                 "\taddq%.l #1,%0\n"                           \
508 +                 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));               \
509 +  }                                                            \
510 +})
511 +
512 +#define flush_dcache()                                         \
513 +({                                                             \
514 +  unsigned long set;                                           \
515 +  unsigned long start_set;                                     \
516 +  unsigned long end_set;                                       \
517 +                                                               \
518 +  start_set = 0;                                               \
519 +  end_set = (unsigned long)LAST_DCACHE_ADDR;                   \
520 +                                                               \
521 +  for (set = start_set; set <= end_set; set += (0x10 - 3)) {   \
522 +    asm volatile("cpushl %%dc,(%0)\n"                          \
523 +                 "\taddq%.l #1,%0\n"                           \
524 +                 "\tcpushl %%dc,(%0)\n"                                \
525 +                 "\taddq%.l #1,%0\n"                           \
526 +                 "\tcpushl %%dc,(%0)\n"                                \
527 +                 "\taddq%.l #1,%0\n"                           \
528 +                 "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set));               \
529 +  }                                                            \
530 +})
531 +
532 +#define flush_bcache()                                         \
533 +({                                                             \
534 +  unsigned long set;                                           \
535 +  unsigned long start_set;                                     \
536 +  unsigned long end_set;                                       \
537 +                                                               \
538 +  start_set = 0;                                               \
539 +  end_set = (unsigned long)LAST_DCACHE_ADDR;                   \
540 +                                                               \
541 +  for (set = start_set; set <= end_set; set += (0x10 - 3)) {   \
542 +    asm volatile("cpushl %%bc,(%0)\n"                          \
543 +                 "\taddq%.l #1,%0\n"                           \
544 +                 "\tcpushl %%bc,(%0)\n"                                \
545 +                 "\taddq%.l #1,%0\n"                           \
546 +                 "\tcpushl %%bc,(%0)\n"                                \
547 +                 "\taddq%.l #1,%0\n"                           \
548 +                 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));               \
549 +  }                                                            \
550 +})
551 +
552 +/*
553 + * invalidate the cache for the specified memory range.
554 + * It starts at the physical address specified for
555 + * the given number of bytes.
556 + */
557 +extern void cache_clear(unsigned long paddr, int len);
558 +/*
559 + * push any dirty cache in the specified memory range.
560 + * It starts at the physical address specified for
561 + * the given number of bytes.
562 + */
563 +extern void cache_push(unsigned long paddr, int len);
564 +
565 +/*
566 + * push and invalidate pages in the specified user virtual
567 + * memory range.
568 + */
569 +extern void cache_push_v(unsigned long vaddr, int len);
570 +
571 +/* This is needed whenever the virtual mapping of the current
572 +   process changes.  */
573 +
574 +/**
575 + * flush_cache_mm - Flush an mm_struct
576 + * @mm: mm_struct to flush
577 + */
578 +static inline void flush_cache_mm(struct mm_struct *mm)
579 +{
580 +       if (mm == current->mm)
581 +               flush_bcache();
582 +}
583 +
584 +#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
585 +
586 +#define flush_cache_all()              flush_bcache()
587 +
588 +/**
589 + * flush_cache_range - Flush a cache range
590 + * @vma: vma struct
591 + * @start: Starting address
592 + * @end: Ending address
593 + *
594 + * flush_cache_range must be a macro to avoid a dependency on
595 + * linux/mm.h which includes this file.
596 + */
597 +static inline void flush_cache_range(struct vm_area_struct *vma,
598 +       unsigned long start, unsigned long end)
599 +{
600 +       if (vma->vm_mm == current->mm)
601 +               flush_bcache();
602 +//             cf_cache_flush_range(start, end);
603 +}
604 +
605 +/**
606 + * flush_cache_page - Flush a page of the cache
607 + * @vma: vma struct
608 + * @vmaddr:
609 + * @pfn: page numer
610 + *
611 + * flush_cache_page must be a macro to avoid a dependency on
612 + * linux/mm.h which includes this file.
613 + */
614 +static inline void flush_cache_page(struct vm_area_struct *vma,
615 +       unsigned long vmaddr, unsigned long pfn)
616 +{
617 +       if (vma->vm_mm == current->mm)
618 +               flush_bcache();
619 +//             cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
620 +}
621 +
622 +/* Push the page at kernel virtual address and clear the icache */
623 +/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
624 +#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
625 +extern inline void __flush_page_to_ram(void *address)
626 +{
627 +  unsigned long set;
628 +  unsigned long start_set;
629 +  unsigned long end_set;
630 +  unsigned long addr = (unsigned long) address;
631 +
632 +  addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
633 +
634 +  start_set = addr & _ICACHE_SET_MASK;
635 +  end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
636 +
637 +  if (start_set > end_set) {
638 +    /* from the begining to the lowest address */
639 +    for (set = 0; set <= end_set; set += (0x10 - 3)) {
640 +      asm volatile("cpushl %%bc,(%0)\n"
641 +                   "\taddq%.l #1,%0\n"
642 +                   "\tcpushl %%bc,(%0)\n"
643 +                   "\taddq%.l #1,%0\n"
644 +                   "\tcpushl %%bc,(%0)\n"
645 +                   "\taddq%.l #1,%0\n"
646 +                   "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
647 +    }
648 +    /* next loop will finish the cache ie pass the hole */
649 +    end_set = LAST_ICACHE_ADDR;    
650 +  }
651 +  for (set = start_set; set <= end_set; set += (0x10 - 3)) {
652 +    asm volatile("cpushl %%bc,(%0)\n"
653 +                 "\taddq%.l #1,%0\n"
654 +                 "\tcpushl %%bc,(%0)\n"
655 +                 "\taddq%.l #1,%0\n"
656 +                 "\tcpushl %%bc,(%0)\n"
657 +                 "\taddq%.l #1,%0\n"
658 +                 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
659 +  }
660 +}
661 +
662 +/* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
663 +#define flush_dcache_page(page)                        \
664 +       __flush_page_to_ram((void *) page_address(page))
665 +#define flush_icache_page(vma,pg)              \
666 +       __flush_page_to_ram((void *) page_address(pg))
667 +#define flush_icache_user_range(adr,len)       do { } while (0)
668 +/* NL */
669 +#define flush_icache_user_page(vma,page,addr,len)      do { } while (0)
670 +
671 +/* Push n pages at kernel virtual address and clear the icache */
672 +/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
673 +extern inline void flush_icache_range (unsigned long address,
674 +                                      unsigned long endaddr)
675 +{
676 +  unsigned long set;
677 +  unsigned long start_set;
678 +  unsigned long end_set;
679 +
680 +  start_set = address & _ICACHE_SET_MASK;
681 +  end_set = endaddr & _ICACHE_SET_MASK;
682 +
683 +  if (start_set > end_set) {
684 +    /* from the begining to the lowest address */
685 +    for (set = 0; set <= end_set; set += (0x10 - 3)) {
686 +      asm volatile("cpushl %%ic,(%0)\n"
687 +                   "\taddq%.l #1,%0\n"
688 +                   "\tcpushl %%ic,(%0)\n"
689 +                   "\taddq%.l #1,%0\n"
690 +                   "\tcpushl %%ic,(%0)\n"
691 +                   "\taddq%.l #1,%0\n"
692 +                   "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
693 +    }
694 +    /* next loop will finish the cache ie pass the hole */
695 +    end_set = LAST_ICACHE_ADDR;    
696 +  }
697 +  for (set = start_set; set <= end_set; set += (0x10 - 3)) {
698 +    asm volatile("cpushl %%ic,(%0)\n"
699 +                 "\taddq%.l #1,%0\n"
700 +                 "\tcpushl %%ic,(%0)\n"
701 +                 "\taddq%.l #1,%0\n"
702 +                 "\tcpushl %%ic,(%0)\n"
703 +                 "\taddq%.l #1,%0\n"
704 +                 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
705 +  }
706 +}
707 +
708 +static inline void copy_to_user_page(struct vm_area_struct *vma,
709 +                                    struct page *page, unsigned long vaddr,
710 +                                    void *dst, void *src, int len)
711 +{
712 +       memcpy(dst, src, len);
713 +       flush_icache_user_page(vma, page, vaddr, len);
714 +}
715 +static inline void copy_from_user_page(struct vm_area_struct *vma,
716 +                                      struct page *page, unsigned long vaddr,
717 +                                      void *dst, void *src, int len)
718 +{
719 +       memcpy(dst, src, len);
720 +}
721 +
722 +#define flush_cache_vmap(start, end)           flush_cache_all()
723 +#define flush_cache_vunmap(start, end)         flush_cache_all()
724 +#define flush_dcache_mmap_lock(mapping)                do { } while (0)
725 +#define flush_dcache_mmap_unlock(mapping)      do { } while (0)
726 +
727 +#endif /* M68K_CF_548x_CACHEFLUSH_H */
728 --- a/include/asm-m68k/cf_cacheflush.h
729 +++ b/include/asm-m68k/cf_cacheflush.h
730 @@ -1,244 +1,10 @@
731  #ifndef M68K_CF_CACHEFLUSH_H
732  #define M68K_CF_CACHEFLUSH_H
733  
734 -#include <asm/cfcache.h>
735 -/*
736 - * Cache handling functions
737 - */
738 -
739 -#define flush_icache()                                         \
740 -({                                                             \
741 -  unsigned long set;                                           \
742 -  unsigned long start_set;                                     \
743 -  unsigned long end_set;                                       \
744 -                                                               \
745 -  start_set = 0;                                               \
746 -  end_set = (unsigned long)LAST_DCACHE_ADDR;                   \
747 -                                                               \
748 -  for (set = start_set; set <= end_set; set += (0x10 - 3)) {   \
749 -    asm volatile("cpushl %%ic,(%0)\n"                          \
750 -                 "\taddq%.l #1,%0\n"                           \
751 -                 "\tcpushl %%ic,(%0)\n"                                \
752 -                 "\taddq%.l #1,%0\n"                           \
753 -                 "\tcpushl %%ic,(%0)\n"                                \
754 -                 "\taddq%.l #1,%0\n"                           \
755 -                 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));               \
756 -  }                                                            \
757 -})
758 -
759 -#define flush_dcache()                                         \
760 -({                                                             \
761 -  unsigned long set;                                           \
762 -  unsigned long start_set;                                     \
763 -  unsigned long end_set;                                       \
764 -                                                               \
765 -  start_set = 0;                                               \
766 -  end_set = (unsigned long)LAST_DCACHE_ADDR;                   \
767 -                                                               \
768 -  for (set = start_set; set <= end_set; set += (0x10 - 3)) {   \
769 -    asm volatile("cpushl %%dc,(%0)\n"                          \
770 -                 "\taddq%.l #1,%0\n"                           \
771 -                 "\tcpushl %%dc,(%0)\n"                                \
772 -                 "\taddq%.l #1,%0\n"                           \
773 -                 "\tcpushl %%dc,(%0)\n"                                \
774 -                 "\taddq%.l #1,%0\n"                           \
775 -                 "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set));               \
776 -  }                                                            \
777 -})
778 -
779 -#define flush_bcache()                                         \
780 -({                                                             \
781 -  unsigned long set;                                           \
782 -  unsigned long start_set;                                     \
783 -  unsigned long end_set;                                       \
784 -                                                               \
785 -  start_set = 0;                                               \
786 -  end_set = (unsigned long)LAST_DCACHE_ADDR;                   \
787 -                                                               \
788 -  for (set = start_set; set <= end_set; set += (0x10 - 3)) {   \
789 -    asm volatile("cpushl %%bc,(%0)\n"                          \
790 -                 "\taddq%.l #1,%0\n"                           \
791 -                 "\tcpushl %%bc,(%0)\n"                                \
792 -                 "\taddq%.l #1,%0\n"                           \
793 -                 "\tcpushl %%bc,(%0)\n"                                \
794 -                 "\taddq%.l #1,%0\n"                           \
795 -                 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));               \
796 -  }                                                            \
797 -})
798 -
799 -/*
800 - * invalidate the cache for the specified memory range.
801 - * It starts at the physical address specified for
802 - * the given number of bytes.
803 - */
804 -extern void cache_clear(unsigned long paddr, int len);
805 -/*
806 - * push any dirty cache in the specified memory range.
807 - * It starts at the physical address specified for
808 - * the given number of bytes.
809 - */
810 -extern void cache_push(unsigned long paddr, int len);
811 -
812 -/*
813 - * push and invalidate pages in the specified user virtual
814 - * memory range.
815 - */
816 -extern void cache_push_v(unsigned long vaddr, int len);
817 -
818 -/* This is needed whenever the virtual mapping of the current
819 -   process changes.  */
820 -
821 -/**
822 - * flush_cache_mm - Flush an mm_struct
823 - * @mm: mm_struct to flush
824 - */
825 -static inline void flush_cache_mm(struct mm_struct *mm)
826 -{
827 -       if (mm == current->mm)
828 -               flush_bcache();
829 -}
830 -
831 -#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
832 -
833 -#define flush_cache_all()              flush_bcache()
834 -
835 -/**
836 - * flush_cache_range - Flush a cache range
837 - * @vma: vma struct
838 - * @start: Starting address
839 - * @end: Ending address
840 - *
841 - * flush_cache_range must be a macro to avoid a dependency on
842 - * linux/mm.h which includes this file.
843 - */
844 -static inline void flush_cache_range(struct vm_area_struct *vma,
845 -       unsigned long start, unsigned long end)
846 -{
847 -       if (vma->vm_mm == current->mm)
848 -               flush_bcache();
849 -//             cf_cache_flush_range(start, end);
850 -}
851 -
852 -/**
853 - * flush_cache_page - Flush a page of the cache
854 - * @vma: vma struct
855 - * @vmaddr:
856 - * @pfn: page numer
857 - *
858 - * flush_cache_page must be a macro to avoid a dependency on
859 - * linux/mm.h which includes this file.
860 - */
861 -static inline void flush_cache_page(struct vm_area_struct *vma,
862 -       unsigned long vmaddr, unsigned long pfn)
863 -{
864 -       if (vma->vm_mm == current->mm)
865 -               flush_bcache();
866 -//             cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
867 -}
868 -
869 -/* Push the page at kernel virtual address and clear the icache */
870 -/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
871 -#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
872 -extern inline void __flush_page_to_ram(void *address)
873 -{
874 -  unsigned long set;
875 -  unsigned long start_set;
876 -  unsigned long end_set;
877 -  unsigned long addr = (unsigned long) address;
878 -
879 -  addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
880 -
881 -  start_set = addr & _ICACHE_SET_MASK;
882 -  end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
883 -
884 -  if (start_set > end_set) {
885 -    /* from the begining to the lowest address */
886 -    for (set = 0; set <= end_set; set += (0x10 - 3)) {
887 -      asm volatile("cpushl %%bc,(%0)\n"
888 -                   "\taddq%.l #1,%0\n"
889 -                   "\tcpushl %%bc,(%0)\n"
890 -                   "\taddq%.l #1,%0\n"
891 -                   "\tcpushl %%bc,(%0)\n"
892 -                   "\taddq%.l #1,%0\n"
893 -                   "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
894 -    }
895 -    /* next loop will finish the cache ie pass the hole */
896 -    end_set = LAST_ICACHE_ADDR;    
897 -  }
898 -  for (set = start_set; set <= end_set; set += (0x10 - 3)) {
899 -    asm volatile("cpushl %%bc,(%0)\n"
900 -                 "\taddq%.l #1,%0\n"
901 -                 "\tcpushl %%bc,(%0)\n"
902 -                 "\taddq%.l #1,%0\n"
903 -                 "\tcpushl %%bc,(%0)\n"
904 -                 "\taddq%.l #1,%0\n"
905 -                 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
906 -  }
907 -}
908 -
909 -/* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
910 -#define flush_dcache_page(page)                        \
911 -       __flush_page_to_ram((void *) page_address(page))
912 -#define flush_icache_page(vma,pg)              \
913 -       __flush_page_to_ram((void *) page_address(pg))
914 -#define flush_icache_user_range(adr,len)       do { } while (0)
915 -/* NL */
916 -#define flush_icache_user_page(vma,page,addr,len)      do { } while (0)
917 -
918 -/* Push n pages at kernel virtual address and clear the icache */
919 -/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
920 -extern inline void flush_icache_range (unsigned long address,
921 -                                      unsigned long endaddr)
922 -{
923 -  unsigned long set;
924 -  unsigned long start_set;
925 -  unsigned long end_set;
926 -
927 -  start_set = address & _ICACHE_SET_MASK;
928 -  end_set = endaddr & _ICACHE_SET_MASK;
929 -
930 -  if (start_set > end_set) {
931 -    /* from the begining to the lowest address */
932 -    for (set = 0; set <= end_set; set += (0x10 - 3)) {
933 -      asm volatile("cpushl %%ic,(%0)\n"
934 -                   "\taddq%.l #1,%0\n"
935 -                   "\tcpushl %%ic,(%0)\n"
936 -                   "\taddq%.l #1,%0\n"
937 -                   "\tcpushl %%ic,(%0)\n"
938 -                   "\taddq%.l #1,%0\n"
939 -                   "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
940 -    }
941 -    /* next loop will finish the cache ie pass the hole */
942 -    end_set = LAST_ICACHE_ADDR;    
943 -  }
944 -  for (set = start_set; set <= end_set; set += (0x10 - 3)) {
945 -    asm volatile("cpushl %%ic,(%0)\n"
946 -                 "\taddq%.l #1,%0\n"
947 -                 "\tcpushl %%ic,(%0)\n"
948 -                 "\taddq%.l #1,%0\n"
949 -                 "\tcpushl %%ic,(%0)\n"
950 -                 "\taddq%.l #1,%0\n"
951 -                 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
952 -  }
953 -}
954 -
955 -static inline void copy_to_user_page(struct vm_area_struct *vma,
956 -                                    struct page *page, unsigned long vaddr,
957 -                                    void *dst, void *src, int len)
958 -{
959 -       memcpy(dst, src, len);
960 -       flush_icache_user_page(vma, page, vaddr, len);
961 -}
962 -static inline void copy_from_user_page(struct vm_area_struct *vma,
963 -                                      struct page *page, unsigned long vaddr,
964 -                                      void *dst, void *src, int len)
965 -{
966 -       memcpy(dst, src, len);
967 -}
968 -
969 -#define flush_cache_vmap(start, end)           flush_cache_all()
970 -#define flush_cache_vunmap(start, end)         flush_cache_all()
971 -#define flush_dcache_mmap_lock(mapping)                do { } while (0)
972 -#define flush_dcache_mmap_unlock(mapping)      do { } while (0)
973 +#ifdef CONFIG_M5445X
974 +#include "cf_5445x_cacheflush.h"
975 +#else
976 +#include "cf_548x_cacheflush.h"
977 +#endif
978  
979  #endif /* M68K_CF_CACHEFLUSH_H */