1 From 776726ff626249276936a7e1f865103ea4e1b7e9 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Tue, 3 Dec 2013 17:05:05 +0100
4 Subject: [PATCH] DMA: add rt2880 dma engine
6 Signed-off-by: John Crispin <blogic@openwrt.org>
8 drivers/dma/Kconfig | 6 +
9 drivers/dma/Makefile | 1 +
10 drivers/dma/ralink-gdma.c | 596 +++++++++++++++++++++++++++++++++++++++++++++
11 3 files changed, 603 insertions(+)
12 create mode 100644 drivers/dma/ralink-gdma.c
14 --- a/drivers/dma/Kconfig
15 +++ b/drivers/dma/Kconfig
16 @@ -312,6 +312,12 @@ config MMP_PDMA
18 Support the MMP PDMA engine for PXA and MMP platfrom.
21 + tristate "RALINK DMA support"
22 + depends on RALINK && SOC_MT7620
24 + select DMA_VIRTUAL_CHANNELS
30 +++ b/drivers/dma/ralink-gdma.c
33 + * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
34 + * GDMA4740 DMAC support
36 + * This program is free software; you can redistribute it and/or modify it
37 + * under the terms of the GNU General Public License as published by the
38 + * Free Software Foundation; either version 2 of the License, or (at your
39 + * option) any later version.
41 + * You should have received a copy of the GNU General Public License along
42 + * with this program; if not, write to the Free Software Foundation, Inc.,
43 + * 675 Mass Ave, Cambridge, MA 02139, USA.
47 +#include <linux/dmaengine.h>
48 +#include <linux/dma-mapping.h>
49 +#include <linux/err.h>
50 +#include <linux/init.h>
51 +#include <linux/list.h>
52 +#include <linux/module.h>
53 +#include <linux/platform_device.h>
54 +#include <linux/slab.h>
55 +#include <linux/spinlock.h>
56 +#include <linux/irq.h>
57 +#include <linux/of_dma.h>
59 +#include "virt-dma.h"
61 +#define GDMA_NR_CHANS 16
63 +#define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10)
64 +#define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10)
66 +#define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10)
67 +#define GDMA_REG_CTRL0_TX_MASK 0xffff
68 +#define GDMA_REG_CTRL0_TX_SHIFT 16
69 +#define GDMA_REG_CTRL0_CURR_MASK 0xff
70 +#define GDMA_REG_CTRL0_CURR_SHIFT 8
71 +#define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7)
72 +#define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6)
73 +#define GDMA_REG_CTRL0_BURST_MASK 0x7
74 +#define GDMA_REG_CTRL0_BURST_SHIFT 3
75 +#define GDMA_REG_CTRL0_DONE_INT BIT(2)
76 +#define GDMA_REG_CTRL0_ENABLE BIT(1)
77 +#define GDMA_REG_CTRL0_HW_MODE 0
79 +#define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10)
80 +#define GDMA_REG_CTRL1_SEG_MASK 0xf
81 +#define GDMA_REG_CTRL1_SEG_SHIFT 22
82 +#define GDMA_REG_CTRL1_REQ_MASK 0x3f
83 +#define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16
84 +#define GDMA_REG_CTRL1_DST_REQ_SHIFT 8
85 +#define GDMA_REG_CTRL1_CONTINOUS BIT(14)
86 +#define GDMA_REG_CTRL1_NEXT_MASK 0x1f
87 +#define GDMA_REG_CTRL1_NEXT_SHIFT 3
88 +#define GDMA_REG_CTRL1_COHERENT BIT(2)
89 +#define GDMA_REG_CTRL1_FAIL BIT(1)
90 +#define GDMA_REG_CTRL1_MASK BIT(0)
92 +#define GDMA_REG_UNMASK_INT 0x200
93 +#define GDMA_REG_DONE_INT 0x204
95 +#define GDMA_REG_GCT 0x220
96 +#define GDMA_REG_GCT_CHAN_MASK 0x3
97 +#define GDMA_REG_GCT_CHAN_SHIFT 3
98 +#define GDMA_REG_GCT_VER_MASK 0x3
99 +#define GDMA_REG_GCT_VER_SHIFT 1
100 +#define GDMA_REG_GCT_ARBIT_RR BIT(0)
102 +enum gdma_dma_transfer_size {
103 + GDMA_TRANSFER_SIZE_4BYTE = 0,
104 + GDMA_TRANSFER_SIZE_8BYTE = 1,
105 + GDMA_TRANSFER_SIZE_16BYTE = 2,
106 + GDMA_TRANSFER_SIZE_32BYTE = 3,
109 +struct gdma_dma_sg {
114 +struct gdma_dma_desc {
115 + struct virt_dma_desc vdesc;
117 + enum dma_transfer_direction direction;
120 + unsigned int num_sgs;
121 + struct gdma_dma_sg sg[];
124 +struct gdma_dmaengine_chan {
125 + struct virt_dma_chan vchan;
128 + dma_addr_t fifo_addr;
129 + unsigned int transfer_shift;
131 + struct gdma_dma_desc *desc;
132 + unsigned int next_sg;
135 +struct gdma_dma_dev {
136 + struct dma_device ddev;
137 + void __iomem *base;
140 + struct gdma_dmaengine_chan chan[GDMA_NR_CHANS];
143 +static struct gdma_dma_dev *gdma_dma_chan_get_dev(
144 + struct gdma_dmaengine_chan *chan)
146 + return container_of(chan->vchan.chan.device, struct gdma_dma_dev,
150 +static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c)
152 + return container_of(c, struct gdma_dmaengine_chan, vchan.chan);
155 +static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc)
157 + return container_of(vdesc, struct gdma_dma_desc, vdesc);
160 +static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev,
163 + return readl(dma_dev->base + reg);
166 +static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev,
167 + unsigned reg, uint32_t val)
169 + //printk("gdma --> %p = 0x%08X\n", dma_dev->base + reg, val);
170 + writel(val, dma_dev->base + reg);
173 +static inline void gdma_dma_write_mask(struct gdma_dma_dev *dma_dev,
174 + unsigned int reg, uint32_t val, uint32_t mask)
178 + tmp = gdma_dma_read(dma_dev, reg);
181 + gdma_dma_write(dma_dev, reg, tmp);
184 +static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs)
186 + return kzalloc(sizeof(struct gdma_dma_desc) +
187 + sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC);
190 +static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst)
193 + return GDMA_TRANSFER_SIZE_4BYTE;
194 + else if (maxburst <= 15)
195 + return GDMA_TRANSFER_SIZE_8BYTE;
196 + else if (maxburst <= 31)
197 + return GDMA_TRANSFER_SIZE_16BYTE;
199 + return GDMA_TRANSFER_SIZE_32BYTE;
202 +static int gdma_dma_slave_config(struct dma_chan *c,
203 + const struct dma_slave_config *config)
205 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
206 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
207 + enum gdma_dma_transfer_size transfer_size;
209 + uint32_t ctrl0, ctrl1;
211 + switch (config->direction) {
212 + case DMA_MEM_TO_DEV:
213 + ctrl1 = 32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT;
214 + ctrl1 |= config->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT;
215 + flags = GDMA_REG_CTRL0_DST_ADDR_FIXED;
216 + transfer_size = gdma_dma_maxburst(config->dst_maxburst);
217 + chan->fifo_addr = config->dst_addr;
220 + case DMA_DEV_TO_MEM:
221 + ctrl1 = config->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT;
222 + ctrl1 |= 32 << GDMA_REG_CTRL1_DST_REQ_SHIFT;
223 + flags = GDMA_REG_CTRL0_SRC_ADDR_FIXED;
224 + transfer_size = gdma_dma_maxburst(config->src_maxburst);
225 + chan->fifo_addr = config->src_addr;
232 + chan->transfer_shift = 1 + transfer_size;
234 + ctrl0 = flags | GDMA_REG_CTRL0_HW_MODE;
235 + ctrl0 |= GDMA_REG_CTRL0_DONE_INT;
237 + ctrl1 &= ~(GDMA_REG_CTRL1_NEXT_MASK << GDMA_REG_CTRL1_NEXT_SHIFT);
238 + ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
239 + ctrl1 |= GDMA_REG_CTRL1_FAIL;
240 + ctrl1 &= ~GDMA_REG_CTRL1_CONTINOUS;
241 + gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
242 + gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
247 +static int gdma_dma_terminate_all(struct dma_chan *c)
249 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
250 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
251 + unsigned long flags;
254 + spin_lock_irqsave(&chan->vchan.lock, flags);
255 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), 0,
256 + GDMA_REG_CTRL0_ENABLE);
258 + vchan_get_all_descriptors(&chan->vchan, &head);
259 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
261 + vchan_dma_desc_free_list(&chan->vchan, &head);
266 +static int gdma_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
269 + struct dma_slave_config *config = (struct dma_slave_config *)arg;
272 + case DMA_SLAVE_CONFIG:
273 + return gdma_dma_slave_config(chan, config);
274 + case DMA_TERMINATE_ALL:
275 + return gdma_dma_terminate_all(chan);
281 +static int gdma_dma_start_transfer(struct gdma_dmaengine_chan *chan)
283 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
284 + dma_addr_t src_addr, dst_addr;
285 + struct virt_dma_desc *vdesc;
286 + struct gdma_dma_sg *sg;
288 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), 0,
289 + GDMA_REG_CTRL0_ENABLE);
292 + vdesc = vchan_next_desc(&chan->vchan);
295 + chan->desc = to_gdma_dma_desc(vdesc);
299 + if (chan->next_sg == chan->desc->num_sgs)
302 + sg = &chan->desc->sg[chan->next_sg];
304 + if (chan->desc->direction == DMA_MEM_TO_DEV) {
305 + src_addr = sg->addr;
306 + dst_addr = chan->fifo_addr;
308 + src_addr = chan->fifo_addr;
309 + dst_addr = sg->addr;
311 + gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
312 + gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
313 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id),
314 + (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | GDMA_REG_CTRL0_ENABLE,
315 + GDMA_REG_CTRL0_TX_MASK << GDMA_REG_CTRL0_TX_SHIFT);
317 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL1(chan->id), 0, GDMA_REG_CTRL1_MASK);
322 +static void gdma_dma_chan_irq(struct gdma_dmaengine_chan *chan)
324 + spin_lock(&chan->vchan.lock);
326 + if (chan->desc && chan->desc->cyclic) {
327 + vchan_cyclic_callback(&chan->desc->vdesc);
329 + if (chan->next_sg == chan->desc->num_sgs) {
331 + vchan_cookie_complete(&chan->desc->vdesc);
335 + gdma_dma_start_transfer(chan);
336 + spin_unlock(&chan->vchan.lock);
339 +static irqreturn_t gdma_dma_irq(int irq, void *devid)
341 + struct gdma_dma_dev *dma_dev = devid;
342 + uint32_t unmask, done;
345 + unmask = gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT);
346 + gdma_dma_write(dma_dev, GDMA_REG_UNMASK_INT, unmask);
347 + done = gdma_dma_read(dma_dev, GDMA_REG_DONE_INT);
349 + for (i = 0; i < GDMA_NR_CHANS; ++i)
351 + gdma_dma_chan_irq(&dma_dev->chan[i]);
352 + gdma_dma_write(dma_dev, GDMA_REG_DONE_INT, done);
354 + return IRQ_HANDLED;
357 +static void gdma_dma_issue_pending(struct dma_chan *c)
359 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
360 + unsigned long flags;
362 + spin_lock_irqsave(&chan->vchan.lock, flags);
363 + if (vchan_issue_pending(&chan->vchan) && !chan->desc)
364 + gdma_dma_start_transfer(chan);
365 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
368 +static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg(
369 + struct dma_chan *c, struct scatterlist *sgl,
370 + unsigned int sg_len, enum dma_transfer_direction direction,
371 + unsigned long flags, void *context)
373 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
374 + struct gdma_dma_desc *desc;
375 + struct scatterlist *sg;
378 + desc = gdma_dma_alloc_desc(sg_len);
382 + for_each_sg(sgl, sg, sg_len, i) {
383 + desc->sg[i].addr = sg_dma_address(sg);
384 + desc->sg[i].len = sg_dma_len(sg);
387 + desc->num_sgs = sg_len;
388 + desc->direction = direction;
389 + desc->cyclic = false;
391 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
394 +static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic(
395 + struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
396 + size_t period_len, enum dma_transfer_direction direction,
397 + unsigned long flags, void *context)
399 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
400 + struct gdma_dma_desc *desc;
401 + unsigned int num_periods, i;
403 + if (buf_len % period_len)
406 + num_periods = buf_len / period_len;
408 + desc = gdma_dma_alloc_desc(num_periods);
412 + for (i = 0; i < num_periods; i++) {
413 + desc->sg[i].addr = buf_addr;
414 + desc->sg[i].len = period_len;
415 + buf_addr += period_len;
418 + desc->num_sgs = num_periods;
419 + desc->direction = direction;
420 + desc->cyclic = true;
422 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
425 +static size_t gdma_dma_desc_residue(struct gdma_dmaengine_chan *chan,
426 + struct gdma_dma_desc *desc, unsigned int next_sg)
428 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
429 + unsigned int residue, count;
434 + for (i = next_sg; i < desc->num_sgs; i++)
435 + residue += desc->sg[i].len;
437 + if (next_sg != 0) {
438 + count = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
439 + count >>= GDMA_REG_CTRL0_CURR_SHIFT;
440 + count &= GDMA_REG_CTRL0_CURR_MASK;
441 + residue += count << chan->transfer_shift;
447 +static enum dma_status gdma_dma_tx_status(struct dma_chan *c,
448 + dma_cookie_t cookie, struct dma_tx_state *state)
450 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
451 + struct virt_dma_desc *vdesc;
452 + enum dma_status status;
453 + unsigned long flags;
455 + status = dma_cookie_status(c, cookie, state);
456 + if (status == DMA_SUCCESS || !state)
459 + spin_lock_irqsave(&chan->vchan.lock, flags);
460 + vdesc = vchan_find_desc(&chan->vchan, cookie);
461 + if (cookie == chan->desc->vdesc.tx.cookie) {
462 + state->residue = gdma_dma_desc_residue(chan, chan->desc,
464 + } else if (vdesc) {
465 + state->residue = gdma_dma_desc_residue(chan,
466 + to_gdma_dma_desc(vdesc), 0);
468 + state->residue = 0;
470 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
475 +static int gdma_dma_alloc_chan_resources(struct dma_chan *c)
480 +static void gdma_dma_free_chan_resources(struct dma_chan *c)
482 + vchan_free_chan_resources(to_virt_chan(c));
485 +static void gdma_dma_desc_free(struct virt_dma_desc *vdesc)
487 + kfree(container_of(vdesc, struct gdma_dma_desc, vdesc));
490 +static struct dma_chan *
491 +of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec,
492 + struct of_dma *ofdma)
494 + struct gdma_dma_dev *dma_dev = ofdma->of_dma_data;
495 + unsigned int request = dma_spec->args[0];
497 + if (request >= GDMA_NR_CHANS)
500 + return dma_get_slave_channel(&(dma_dev->chan[request].vchan.chan));
503 +static int gdma_dma_probe(struct platform_device *pdev)
505 + struct gdma_dmaengine_chan *chan;
506 + struct gdma_dma_dev *dma_dev;
507 + struct dma_device *dd;
509 + struct resource *res;
515 + dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL);
519 + dd = &dma_dev->ddev;
521 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
522 + dma_dev->base = devm_ioremap_resource(&pdev->dev, res);
523 + if (IS_ERR(dma_dev->base))
524 + return PTR_ERR(dma_dev->base);
526 + dma_cap_set(DMA_SLAVE, dd->cap_mask);
527 + dma_cap_set(DMA_CYCLIC, dd->cap_mask);
528 + dd->device_alloc_chan_resources = gdma_dma_alloc_chan_resources;
529 + dd->device_free_chan_resources = gdma_dma_free_chan_resources;
530 + dd->device_tx_status = gdma_dma_tx_status;
531 + dd->device_issue_pending = gdma_dma_issue_pending;
532 + dd->device_prep_slave_sg = gdma_dma_prep_slave_sg;
533 + dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic;
534 + dd->device_control = gdma_dma_control;
535 + dd->dev = &pdev->dev;
536 + dd->chancnt = GDMA_NR_CHANS;
537 + INIT_LIST_HEAD(&dd->channels);
539 + for (i = 0; i < dd->chancnt; i++) {
540 + chan = &dma_dev->chan[i];
542 + chan->vchan.desc_free = gdma_dma_desc_free;
543 + vchan_init(&chan->vchan, dd);
546 + ret = dma_async_device_register(dd);
550 + ret = of_dma_controller_register(pdev->dev.of_node,
551 + of_dma_xlate_by_chan_id, dma_dev);
553 + goto err_unregister;
555 + irq = platform_get_irq(pdev, 0);
556 + ret = request_irq(irq, gdma_dma_irq, 0, dev_name(&pdev->dev), dma_dev);
558 + goto err_unregister;
560 + gdma_dma_write(dma_dev, GDMA_REG_UNMASK_INT, 0);
561 + gdma_dma_write(dma_dev, GDMA_REG_DONE_INT, BIT(dd->chancnt) - 1);
563 + gct = gdma_dma_read(dma_dev, GDMA_REG_GCT);
564 + dev_info(&pdev->dev, "revision: %d, channels: %d\n",
565 + (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
566 + 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) & GDMA_REG_GCT_CHAN_MASK));
567 + platform_set_drvdata(pdev, dma_dev);
569 + gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR);
574 + dma_async_device_unregister(dd);
578 +static int gdma_dma_remove(struct platform_device *pdev)
580 + struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev);
581 + int irq = platform_get_irq(pdev, 0);
583 + free_irq(irq, dma_dev);
584 + of_dma_controller_free(pdev->dev.of_node);
585 + dma_async_device_unregister(&dma_dev->ddev);
590 +static const struct of_device_id gdma_of_match_table[] = {
591 + { .compatible = "ralink,rt2880-gdma" },
595 +static struct platform_driver gdma_dma_driver = {
596 + .probe = gdma_dma_probe,
597 + .remove = gdma_dma_remove,
599 + .name = "gdma-rt2880",
600 + .owner = THIS_MODULE,
601 + .of_match_table = gdma_of_match_table,
604 +module_platform_driver(gdma_dma_driver);
606 +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
607 +MODULE_DESCRIPTION("GDMA4740 DMA driver");
608 +MODULE_LICENSE("GPLv2");
609 --- a/drivers/dma/dmaengine.c
610 +++ b/drivers/dma/dmaengine.c
611 @@ -504,6 +504,32 @@ static struct dma_chan *private_candidat
615 + * dma_request_slave_channel - try to get specific channel exclusively
616 + * @chan: target channel
618 +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
622 + /* lock against __dma_request_channel */
623 + mutex_lock(&dma_list_mutex);
625 + if (chan->client_count == 0) {
626 + err = dma_chan_get(chan);
628 + pr_debug("%s: failed to get %s: (%d)\n",
629 + __func__, dma_chan_name(chan), err);
633 + mutex_unlock(&dma_list_mutex);
637 +EXPORT_SYMBOL_GPL(dma_get_slave_channel);
641 * dma_request_channel - try to allocate an exclusive channel
642 * @mask: capabilities that the channel must satisfy
643 * @fn: optional callback to disposition available channels
644 --- a/include/linux/dmaengine.h
645 +++ b/include/linux/dmaengine.h
646 @@ -999,6 +999,7 @@ static inline void dma_release_channel(s
647 int dma_async_device_register(struct dma_device *device);
648 void dma_async_device_unregister(struct dma_device *device);
649 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
650 +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
651 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
652 struct dma_chan *net_dma_find_channel(void);
653 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
654 --- a/drivers/dma/Makefile
655 +++ b/drivers/dma/Makefile
656 @@ -38,3 +38,4 @@ obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
657 obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
658 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
659 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
660 +obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o