X-Git-Url: https://git.enpas.org/?a=blobdiff_plain;f=target%2Flinux%2Fgeneric%2Fpatches-3.10%2F132-mips_inline_dma_ops.patch;h=e31fc1901fceb8e3130a577a8a7b2e76ceee45bb;hb=0ef8ed4abd5ddef0f2f6600f5c6ea482a75e5cd8;hp=b72bb620fdaadb64347ff5669c3b16a834dbab61;hpb=42482772a14032314d4d03571c4ea4f77e44a2df;p=openwrt.git diff --git a/target/linux/generic/patches-3.10/132-mips_inline_dma_ops.patch b/target/linux/generic/patches-3.10/132-mips_inline_dma_ops.patch index b72bb620fd..e31fc1901f 100644 --- a/target/linux/generic/patches-3.10/132-mips_inline_dma_ops.patch +++ b/target/linux/generic/patches-3.10/132-mips_inline_dma_ops.patch @@ -19,7 +19,7 @@ Signed-off-by: Felix Fietkau --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig -@@ -1430,6 +1430,7 @@ config CPU_CAVIUM_OCTEON +@@ -1431,6 +1431,7 @@ config CPU_CAVIUM_OCTEON select LIBFDT select USE_OF select USB_EHCI_BIG_ENDIAN_MMIO @@ -27,7 +27,7 @@ Signed-off-by: Felix Fietkau help The Cavium Octeon processor is a highly integrated chip containing many ethernet hardware widgets for networking tasks. The processor -@@ -1650,6 +1651,9 @@ config SYS_HAS_CPU_XLR +@@ -1651,6 +1652,9 @@ config SYS_HAS_CPU_XLR config SYS_HAS_CPU_XLP bool @@ -52,11 +52,10 @@ Signed-off-by: Felix Fietkau #include #include #include -@@ -10,14 +16,47 @@ - #include - #endif +@@ -12,12 +18,47 @@ + + extern struct dma_map_ops *mips_dma_map_ops; --extern struct dma_map_ops *mips_dma_map_ops; +void __dma_sync(struct page *page, unsigned long offset, size_t size, + enum dma_data_direction direction); +void *mips_dma_alloc_coherent(struct device *dev, size_t size, @@ -64,7 +63,7 @@ Signed-off-by: Felix Fietkau + struct dma_attrs *attrs); +void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, struct dma_attrs *attrs); - ++ static inline struct dma_map_ops *get_dma_ops(struct device *dev) { +#ifdef CONFIG_SYS_HAS_DMA_OPS @@ -101,7 +100,7 @@ Signed-off-by: Felix Fietkau } static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) -@@ -30,12 +69,309 @@ static inline bool dma_capable(struct de +@@ -30,12 +71,309 @@ static inline bool dma_capable(struct de static inline void dma_mark_clean(void *addr, size_t size) {} @@ -413,7 +412,7 @@ Signed-off-by: Felix Fietkau } static inline int dma_mapping_error(struct device *dev, u64 mask) -@@ -43,7 +379,9 @@ static inline int dma_mapping_error(stru +@@ -43,7 +381,9 @@ static inline int dma_mapping_error(stru struct dma_map_ops *ops = get_dma_ops(dev); debug_dma_mapping_error(dev, mask); @@ -424,7 +423,7 @@ Signed-off-by: Felix Fietkau } static inline int -@@ -69,7 +407,11 @@ static inline void *dma_alloc_attrs(stru +@@ -69,7 +409,11 @@ static inline void *dma_alloc_attrs(stru void *ret; struct dma_map_ops *ops = get_dma_ops(dev); @@ -437,7 +436,7 @@ Signed-off-by: Felix Fietkau debug_dma_alloc_coherent(dev, size, *dma_handle, ret); -@@ -84,7 +426,10 @@ static inline void dma_free_attrs(struct +@@ -84,7 +428,10 @@ static inline void dma_free_attrs(struct { struct dma_map_ops *ops = get_dma_ops(dev); @@ -460,7 +459,7 @@ Signed-off-by: Felix Fietkau int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */ static int __init setcoherentio(char *str) -@@ -44,26 +44,6 @@ static int __init setnocoherentio(char * +@@ -44,13 +44,6 @@ static int __init setnocoherentio(char * early_param("nocoherentio", setnocoherentio); #endif @@ -471,23 +470,10 @@ Signed-off-by: Felix Fietkau - plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT); -} - --/* -- * Warning on the terminology - Linux calls an uncached area coherent; -- * MIPS terminology calls memory areas with hardware maintained coherency -- * coherent. -- */ -- --static inline int cpu_is_noncoherent_r10000(struct device *dev) --{ -- return !plat_device_is_coherent(dev) && -- (current_cpu_type() == CPU_R10000 || -- current_cpu_type() == CPU_R12000); --} -- - static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) - { - gfp_t dma_flag; -@@ -119,8 +99,9 @@ void *dma_alloc_noncoherent(struct devic + /* + * The affected CPUs below in 'cpu_needs_post_dma_flush()' can + * speculatively fill random cachelines with stale data at any time, +@@ -123,8 +116,9 @@ void *dma_alloc_noncoherent(struct devic } EXPORT_SYMBOL(dma_alloc_noncoherent); @@ -499,7 +485,7 @@ Signed-off-by: Felix Fietkau { void *ret; -@@ -144,6 +125,7 @@ static void *mips_dma_alloc_coherent(str +@@ -148,6 +142,7 @@ static void *mips_dma_alloc_coherent(str return ret; } @@ -507,7 +493,7 @@ Signed-off-by: Felix Fietkau void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, -@@ -154,8 +136,8 @@ void dma_free_noncoherent(struct device +@@ -158,8 +153,8 @@ void dma_free_noncoherent(struct device } EXPORT_SYMBOL(dma_free_noncoherent); @@ -518,7 +504,7 @@ Signed-off-by: Felix Fietkau { unsigned long addr = (unsigned long) vaddr; int order = get_order(size); -@@ -170,6 +152,7 @@ static void mips_dma_free_coherent(struc +@@ -174,6 +169,7 @@ static void mips_dma_free_coherent(struc free_pages(addr, get_order(size)); } @@ -526,7 +512,7 @@ Signed-off-by: Felix Fietkau static inline void __dma_sync_virtual(void *addr, size_t size, enum dma_data_direction direction) -@@ -198,8 +181,8 @@ static inline void __dma_sync_virtual(vo +@@ -202,8 +198,8 @@ static inline void __dma_sync_virtual(vo * If highmem is not configured then the bulk of this loop gets * optimized out. */ @@ -537,15 +523,14 @@ Signed-off-by: Felix Fietkau { size_t left = size; -@@ -228,109 +211,7 @@ static inline void __dma_sync(struct pag - left -= len; +@@ -233,108 +229,7 @@ static inline void __dma_sync(struct pag } while (left); } -- + -static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) -{ -- if (cpu_is_noncoherent_r10000(dev)) +- if (cpu_needs_post_dma_flush(dev)) - __dma_sync(dma_addr_to_page(dev, dma_addr), - dma_addr & ~PAGE_MASK, size, direction); - @@ -596,7 +581,7 @@ Signed-off-by: Felix Fietkau -static void mips_dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) -{ -- if (cpu_is_noncoherent_r10000(dev)) +- if (cpu_needs_post_dma_flush(dev)) - __dma_sync(dma_addr_to_page(dev, dma_handle), - dma_handle & ~PAGE_MASK, size, direction); -} @@ -616,7 +601,7 @@ Signed-off-by: Felix Fietkau - - /* Make sure that gcc doesn't leave the empty loop body. */ - for (i = 0; i < nelems; i++, sg++) { -- if (cpu_is_noncoherent_r10000(dev)) +- if (cpu_needs_post_dma_flush(dev)) - __dma_sync(sg_page(sg), sg->offset, sg->length, - direction); - } @@ -648,7 +633,7 @@ Signed-off-by: Felix Fietkau void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) -@@ -343,23 +224,10 @@ void dma_cache_sync(struct device *dev, +@@ -347,23 +242,10 @@ void dma_cache_sync(struct device *dev, EXPORT_SYMBOL(dma_cache_sync);