1 From 2c58080407554e1bac8fd50d23cb02420524caed Mon Sep 17 00:00:00 2001
2 From: Felix Fietkau <nbd@openwrt.org>
3 Date: Mon, 12 Aug 2013 12:50:22 +0200
4 Subject: [PATCH] MIPS: partially inline dma ops
6 Several DMA ops are no-op on many platforms, and the indirection through
7 the mips_dma_map_ops function table is causing the compiler to emit
10 Inlining visibly improves network performance in my tests (on a 24Kc
11 based system), and also slightly reduces code size of a few drivers.
13 Signed-off-by: Felix Fietkau <nbd@openwrt.org>
15 arch/mips/Kconfig | 4 +
16 arch/mips/include/asm/dma-mapping.h | 360 +++++++++++++++++++++++++++++++++++-
17 arch/mips/mm/dma-default.c | 163 ++--------------
18 3 files changed, 373 insertions(+), 154 deletions(-)
20 --- a/arch/mips/Kconfig
21 +++ b/arch/mips/Kconfig
22 @@ -1431,6 +1431,7 @@ config CPU_CAVIUM_OCTEON
25 select USB_EHCI_BIG_ENDIAN_MMIO
26 + select SYS_HAS_DMA_OPS
28 The Cavium Octeon processor is a highly integrated chip containing
29 many ethernet hardware widgets for networking tasks. The processor
30 @@ -1651,6 +1652,9 @@ config SYS_HAS_CPU_XLR
31 config SYS_HAS_CPU_XLP
34 +config SYS_HAS_DMA_OPS
38 # CPU may reorder R->R, R->W, W->R, W->W
39 # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
40 --- a/arch/mips/include/asm/dma-mapping.h
41 +++ b/arch/mips/include/asm/dma-mapping.h
43 #ifndef _ASM_DMA_MAPPING_H
44 #define _ASM_DMA_MAPPING_H
46 +#include <linux/kmemcheck.h>
47 +#include <linux/bug.h>
48 +#include <linux/scatterlist.h>
49 +#include <linux/dma-debug.h>
50 +#include <linux/dma-attrs.h>
52 #include <asm/scatterlist.h>
53 #include <asm/dma-coherence.h>
54 #include <asm/cache.h>
57 extern struct dma_map_ops *mips_dma_map_ops;
59 +void __dma_sync(struct page *page, unsigned long offset, size_t size,
60 + enum dma_data_direction direction);
61 +void *mips_dma_alloc_coherent(struct device *dev, size_t size,
62 + dma_addr_t *dma_handle, gfp_t gfp,
63 + struct dma_attrs *attrs);
64 +void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
65 + dma_addr_t dma_handle, struct dma_attrs *attrs);
67 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
69 +#ifdef CONFIG_SYS_HAS_DMA_OPS
70 if (dev && dev->archdata.dma_ops)
71 return dev->archdata.dma_ops;
73 return mips_dma_map_ops;
80 + * Warning on the terminology - Linux calls an uncached area coherent;
81 + * MIPS terminology calls memory areas with hardware maintained coherency
85 +static inline int cpu_is_noncoherent_r10000(struct device *dev)
87 +#ifndef CONFIG_SYS_HAS_CPU_R10000
90 + return !plat_device_is_coherent(dev) &&
91 + (current_cpu_type() == CPU_R10000 ||
92 + current_cpu_type() == CPU_R12000);
95 +static inline struct page *dma_addr_to_page(struct device *dev,
96 + dma_addr_t dma_addr)
99 + plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
102 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
103 @@ -30,12 +71,309 @@ static inline bool dma_capable(struct de
105 static inline void dma_mark_clean(void *addr, size_t size) {}
107 -#include <asm-generic/dma-mapping-common.h>
108 +static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
110 + enum dma_data_direction dir,
111 + struct dma_attrs *attrs)
113 + struct dma_map_ops *ops = get_dma_ops(dev);
114 + unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
115 + struct page *page = virt_to_page(ptr);
118 + kmemcheck_mark_initialized(ptr, size);
119 + BUG_ON(!valid_dma_direction(dir));
121 + addr = ops->map_page(dev, page, offset, size, dir, attrs);
123 + if (!plat_device_is_coherent(dev))
124 + __dma_sync(page, offset, size, dir);
126 + addr = plat_map_dma_mem_page(dev, page) + offset;
128 + debug_dma_map_page(dev, page, offset, size, dir, addr, true);
132 +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
134 + enum dma_data_direction dir,
135 + struct dma_attrs *attrs)
137 + struct dma_map_ops *ops = get_dma_ops(dev);
139 + BUG_ON(!valid_dma_direction(dir));
141 + ops->unmap_page(dev, addr, size, dir, attrs);
143 + if (cpu_is_noncoherent_r10000(dev))
144 + __dma_sync(dma_addr_to_page(dev, addr),
145 + addr & ~PAGE_MASK, size, dir);
147 + plat_unmap_dma_mem(dev, addr, size, dir);
149 + debug_dma_unmap_page(dev, addr, size, dir, true);
152 +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
153 + int nents, enum dma_data_direction dir,
154 + struct dma_attrs *attrs)
156 + struct dma_map_ops *ops = get_dma_ops(dev);
158 + struct scatterlist *s;
160 + for_each_sg(sg, s, nents, i)
161 + kmemcheck_mark_initialized(sg_virt(s), s->length);
162 + BUG_ON(!valid_dma_direction(dir));
164 + ents = ops->map_sg(dev, sg, nents, dir, attrs);
166 + for_each_sg(sg, s, nents, i) {
167 + struct page *page = sg_page(s);
169 + if (!plat_device_is_coherent(dev))
170 + __dma_sync(page, s->offset, s->length, dir);
172 + plat_map_dma_mem_page(dev, page) + s->offset;
176 + debug_dma_map_sg(dev, sg, nents, ents, dir);
181 +static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
182 + int nents, enum dma_data_direction dir,
183 + struct dma_attrs *attrs)
185 + struct dma_map_ops *ops = get_dma_ops(dev);
186 + struct scatterlist *s;
189 + BUG_ON(!valid_dma_direction(dir));
190 + debug_dma_unmap_sg(dev, sg, nents, dir);
192 + ops->unmap_sg(dev, sg, nents, dir, attrs);
196 + for_each_sg(sg, s, nents, i) {
197 + if (!plat_device_is_coherent(dev) && dir != DMA_TO_DEVICE)
198 + __dma_sync(sg_page(s), s->offset, s->length, dir);
199 + plat_unmap_dma_mem(dev, s->dma_address, s->length, dir);
203 +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
204 + size_t offset, size_t size,
205 + enum dma_data_direction dir)
207 + struct dma_map_ops *ops = get_dma_ops(dev);
210 + kmemcheck_mark_initialized(page_address(page) + offset, size);
211 + BUG_ON(!valid_dma_direction(dir));
213 + addr = ops->map_page(dev, page, offset, size, dir, NULL);
215 + if (!plat_device_is_coherent(dev))
216 + __dma_sync(page, offset, size, dir);
218 + addr = plat_map_dma_mem_page(dev, page) + offset;
220 + debug_dma_map_page(dev, page, offset, size, dir, addr, false);
225 +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
226 + size_t size, enum dma_data_direction dir)
228 + struct dma_map_ops *ops = get_dma_ops(dev);
230 + BUG_ON(!valid_dma_direction(dir));
232 + ops->unmap_page(dev, addr, size, dir, NULL);
234 + if (cpu_is_noncoherent_r10000(dev))
235 + __dma_sync(dma_addr_to_page(dev, addr),
236 + addr & ~PAGE_MASK, size, dir);
238 + plat_unmap_dma_mem(dev, addr, size, dir);
240 + debug_dma_unmap_page(dev, addr, size, dir, false);
243 +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
245 + enum dma_data_direction dir)
247 + struct dma_map_ops *ops = get_dma_ops(dev);
249 + BUG_ON(!valid_dma_direction(dir));
251 + ops->sync_single_for_cpu(dev, addr, size, dir);
252 + else if (cpu_is_noncoherent_r10000(dev))
253 + __dma_sync(dma_addr_to_page(dev, addr),
254 + addr & ~PAGE_MASK, size, dir);
255 + debug_dma_sync_single_for_cpu(dev, addr, size, dir);
258 +static inline void dma_sync_single_for_device(struct device *dev,
259 + dma_addr_t addr, size_t size,
260 + enum dma_data_direction dir)
262 + struct dma_map_ops *ops = get_dma_ops(dev);
264 + BUG_ON(!valid_dma_direction(dir));
266 + ops->sync_single_for_device(dev, addr, size, dir);
267 + else if (!plat_device_is_coherent(dev))
268 + __dma_sync(dma_addr_to_page(dev, addr),
269 + addr & ~PAGE_MASK, size, dir);
270 + debug_dma_sync_single_for_device(dev, addr, size, dir);
273 +static inline void dma_sync_single_range_for_cpu(struct device *dev,
275 + unsigned long offset,
277 + enum dma_data_direction dir)
279 + const struct dma_map_ops *ops = get_dma_ops(dev);
281 + BUG_ON(!valid_dma_direction(dir));
283 + ops->sync_single_for_cpu(dev, addr + offset, size, dir);
284 + else if (cpu_is_noncoherent_r10000(dev))
285 + __dma_sync(dma_addr_to_page(dev, addr + offset),
286 + (addr + offset) & ~PAGE_MASK, size, dir);
287 + debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
290 +static inline void dma_sync_single_range_for_device(struct device *dev,
292 + unsigned long offset,
294 + enum dma_data_direction dir)
296 + const struct dma_map_ops *ops = get_dma_ops(dev);
298 + BUG_ON(!valid_dma_direction(dir));
300 + ops->sync_single_for_device(dev, addr + offset, size, dir);
301 + else if (!plat_device_is_coherent(dev))
302 + __dma_sync(dma_addr_to_page(dev, addr + offset),
303 + (addr + offset) & ~PAGE_MASK, size, dir);
304 + debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
308 +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
309 + int nelems, enum dma_data_direction dir)
311 + struct dma_map_ops *ops = get_dma_ops(dev);
312 + struct scatterlist *s;
315 + BUG_ON(!valid_dma_direction(dir));
317 + ops->sync_sg_for_cpu(dev, sg, nelems, dir);
318 + else if (cpu_is_noncoherent_r10000(dev)) {
319 + for_each_sg(sg, s, nelems, i)
320 + __dma_sync(sg_page(s), s->offset, s->length, dir);
322 + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
326 +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
327 + int nelems, enum dma_data_direction dir)
329 + struct dma_map_ops *ops = get_dma_ops(dev);
330 + struct scatterlist *s;
333 + BUG_ON(!valid_dma_direction(dir));
335 + ops->sync_sg_for_device(dev, sg, nelems, dir);
336 + else if (!plat_device_is_coherent(dev)) {
337 + for_each_sg(sg, s, nelems, i)
338 + __dma_sync(sg_page(s), s->offset, s->length, dir);
340 + debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
344 +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
345 +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
346 +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
347 +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
349 +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
350 + void *cpu_addr, dma_addr_t dma_addr, size_t size);
353 + * dma_mmap_attrs - map a coherent DMA allocation into user space
354 + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
355 + * @vma: vm_area_struct describing requested user mapping
356 + * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
357 + * @handle: device-view address returned from dma_alloc_attrs
358 + * @size: size of memory originally requested in dma_alloc_attrs
359 + * @attrs: attributes of mapping properties requested in dma_alloc_attrs
361 + * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
362 + * into user space. The coherent DMA buffer must not be freed by the
363 + * driver until the user space mapping has been released.
366 +dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
367 + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
369 + struct dma_map_ops *ops = get_dma_ops(dev);
371 + if (ops && ops->mmap)
372 + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
373 + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
376 +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
378 +static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
379 + void *cpu_addr, dma_addr_t dma_addr, size_t size)
381 + DEFINE_DMA_ATTRS(attrs);
382 + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
383 + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
387 +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
388 + void *cpu_addr, dma_addr_t dma_addr, size_t size);
391 +dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
392 + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
394 + struct dma_map_ops *ops = get_dma_ops(dev);
396 + if (ops && ops->get_sgtable)
397 + return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
399 + return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
402 +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
405 static inline int dma_supported(struct device *dev, u64 mask)
407 struct dma_map_ops *ops = get_dma_ops(dev);
408 - return ops->dma_supported(dev, mask);
410 + return ops->dma_supported(dev, mask);
411 + return plat_dma_supported(dev, mask);
414 static inline int dma_mapping_error(struct device *dev, u64 mask)
415 @@ -43,7 +381,9 @@ static inline int dma_mapping_error(stru
416 struct dma_map_ops *ops = get_dma_ops(dev);
418 debug_dma_mapping_error(dev, mask);
419 - return ops->mapping_error(dev, mask);
421 + return ops->mapping_error(dev, mask);
426 @@ -69,7 +409,11 @@ static inline void *dma_alloc_attrs(stru
428 struct dma_map_ops *ops = get_dma_ops(dev);
430 - ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
432 + ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
434 + ret = mips_dma_alloc_coherent(dev, size, dma_handle, gfp,
437 debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
439 @@ -84,7 +428,10 @@ static inline void dma_free_attrs(struct
441 struct dma_map_ops *ops = get_dma_ops(dev);
443 - ops->free(dev, size, vaddr, dma_handle, attrs);
445 + ops->free(dev, size, vaddr, dma_handle, attrs);
447 + mips_dma_free_coherent(dev, size, vaddr, dma_handle, attrs);
449 debug_dma_free_coherent(dev, size, vaddr, dma_handle);
451 --- a/arch/mips/mm/dma-default.c
452 +++ b/arch/mips/mm/dma-default.c
455 #ifdef CONFIG_DMA_MAYBE_COHERENT
456 int coherentio = 0; /* User defined DMA coherency from command line. */
457 -EXPORT_SYMBOL_GPL(coherentio);
458 +EXPORT_SYMBOL(coherentio);
459 int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
461 static int __init setcoherentio(char *str)
462 @@ -44,13 +44,6 @@ static int __init setnocoherentio(char *
463 early_param("nocoherentio", setnocoherentio);
466 -static inline struct page *dma_addr_to_page(struct device *dev,
467 - dma_addr_t dma_addr)
469 - return pfn_to_page(
470 - plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
474 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
475 * speculatively fill random cachelines with stale data at any time,
476 @@ -123,8 +116,9 @@ void *dma_alloc_noncoherent(struct devic
478 EXPORT_SYMBOL(dma_alloc_noncoherent);
480 -static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
481 - dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
482 +void *mips_dma_alloc_coherent(struct device *dev, size_t size,
483 + dma_addr_t *dma_handle, gfp_t gfp,
484 + struct dma_attrs *attrs)
488 @@ -148,6 +142,7 @@ static void *mips_dma_alloc_coherent(str
492 +EXPORT_SYMBOL(mips_dma_alloc_coherent);
495 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
496 @@ -158,8 +153,8 @@ void dma_free_noncoherent(struct device
498 EXPORT_SYMBOL(dma_free_noncoherent);
500 -static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
501 - dma_addr_t dma_handle, struct dma_attrs *attrs)
502 +void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
503 + dma_addr_t dma_handle, struct dma_attrs *attrs)
505 unsigned long addr = (unsigned long) vaddr;
506 int order = get_order(size);
507 @@ -174,6 +169,7 @@ static void mips_dma_free_coherent(struc
509 free_pages(addr, get_order(size));
511 +EXPORT_SYMBOL(mips_dma_free_coherent);
513 static inline void __dma_sync_virtual(void *addr, size_t size,
514 enum dma_data_direction direction)
515 @@ -202,8 +198,8 @@ static inline void __dma_sync_virtual(vo
516 * If highmem is not configured then the bulk of this loop gets
519 -static inline void __dma_sync(struct page *page,
520 - unsigned long offset, size_t size, enum dma_data_direction direction)
521 +void __dma_sync(struct page *page, unsigned long offset, size_t size,
522 + enum dma_data_direction direction)
526 @@ -233,108 +229,7 @@ static inline void __dma_sync(struct pag
530 -static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
531 - size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
533 - if (cpu_needs_post_dma_flush(dev))
534 - __dma_sync(dma_addr_to_page(dev, dma_addr),
535 - dma_addr & ~PAGE_MASK, size, direction);
537 - plat_unmap_dma_mem(dev, dma_addr, size, direction);
540 -static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
541 - int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
545 - for (i = 0; i < nents; i++, sg++) {
546 - if (!plat_device_is_coherent(dev))
547 - __dma_sync(sg_page(sg), sg->offset, sg->length,
549 - sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
556 -static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
557 - unsigned long offset, size_t size, enum dma_data_direction direction,
558 - struct dma_attrs *attrs)
560 - if (!plat_device_is_coherent(dev))
561 - __dma_sync(page, offset, size, direction);
563 - return plat_map_dma_mem_page(dev, page) + offset;
566 -static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
567 - int nhwentries, enum dma_data_direction direction,
568 - struct dma_attrs *attrs)
572 - for (i = 0; i < nhwentries; i++, sg++) {
573 - if (!plat_device_is_coherent(dev) &&
574 - direction != DMA_TO_DEVICE)
575 - __dma_sync(sg_page(sg), sg->offset, sg->length,
577 - plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
581 -static void mips_dma_sync_single_for_cpu(struct device *dev,
582 - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
584 - if (cpu_needs_post_dma_flush(dev))
585 - __dma_sync(dma_addr_to_page(dev, dma_handle),
586 - dma_handle & ~PAGE_MASK, size, direction);
589 -static void mips_dma_sync_single_for_device(struct device *dev,
590 - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
592 - if (!plat_device_is_coherent(dev))
593 - __dma_sync(dma_addr_to_page(dev, dma_handle),
594 - dma_handle & ~PAGE_MASK, size, direction);
597 -static void mips_dma_sync_sg_for_cpu(struct device *dev,
598 - struct scatterlist *sg, int nelems, enum dma_data_direction direction)
602 - /* Make sure that gcc doesn't leave the empty loop body. */
603 - for (i = 0; i < nelems; i++, sg++) {
604 - if (cpu_needs_post_dma_flush(dev))
605 - __dma_sync(sg_page(sg), sg->offset, sg->length,
610 -static void mips_dma_sync_sg_for_device(struct device *dev,
611 - struct scatterlist *sg, int nelems, enum dma_data_direction direction)
615 - /* Make sure that gcc doesn't leave the empty loop body. */
616 - for (i = 0; i < nelems; i++, sg++) {
617 - if (!plat_device_is_coherent(dev))
618 - __dma_sync(sg_page(sg), sg->offset, sg->length,
623 -int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
628 -int mips_dma_supported(struct device *dev, u64 mask)
630 - return plat_dma_supported(dev, mask);
632 +EXPORT_SYMBOL(__dma_sync);
634 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
635 enum dma_data_direction direction)
636 @@ -347,23 +242,10 @@ void dma_cache_sync(struct device *dev,
638 EXPORT_SYMBOL(dma_cache_sync);
640 -static struct dma_map_ops mips_default_dma_map_ops = {
641 - .alloc = mips_dma_alloc_coherent,
642 - .free = mips_dma_free_coherent,
643 - .map_page = mips_dma_map_page,
644 - .unmap_page = mips_dma_unmap_page,
645 - .map_sg = mips_dma_map_sg,
646 - .unmap_sg = mips_dma_unmap_sg,
647 - .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
648 - .sync_single_for_device = mips_dma_sync_single_for_device,
649 - .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
650 - .sync_sg_for_device = mips_dma_sync_sg_for_device,
651 - .mapping_error = mips_dma_mapping_error,
652 - .dma_supported = mips_dma_supported
655 -struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
656 +#ifdef CONFIG_SYS_HAS_DMA_OPS
657 +struct dma_map_ops *mips_dma_map_ops = NULL;
658 EXPORT_SYMBOL(mips_dma_map_ops);
661 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)