brcm2708: update against latest rpi-3.10.y branch
[openwrt.git] / target / linux / brcm2708 / patches-3.10 / 0006-cma-Add-vc_cma-driver-to-enable-use-of-CMA.patch
1 From ff28398a53a45795382bdb25db71f2a8b3394dab Mon Sep 17 00:00:00 2001
2 From: popcornmix <popcornmix@gmail.com>
3 Date: Wed, 3 Jul 2013 00:31:47 +0100
4 Subject: [PATCH 006/174] cma: Add vc_cma driver to enable use of CMA
5
6 Signed-off-by: popcornmix <popcornmix@gmail.com>
7 ---
8  drivers/char/Kconfig                  |    2 +
9  drivers/char/Makefile                 |    3 +
10  drivers/char/broadcom/Kconfig         |   16 +
11  drivers/char/broadcom/Makefile        |    2 +
12  drivers/char/broadcom/vc_cma/Makefile |   15 +
13  drivers/char/broadcom/vc_cma/vc_cma.c | 1143 +++++++++++++++++++++++++++++++++
14  drivers/misc/Makefile                 |    2 +-
15  include/linux/broadcom/vc_cma.h       |   30 +
16  8 files changed, 1212 insertions(+), 1 deletion(-)
17  create mode 100644 drivers/char/broadcom/Kconfig
18  create mode 100644 drivers/char/broadcom/Makefile
19  create mode 100644 drivers/char/broadcom/vc_cma/Makefile
20  create mode 100644 drivers/char/broadcom/vc_cma/vc_cma.c
21  create mode 100644 include/linux/broadcom/vc_cma.h
22
23 --- a/drivers/char/Kconfig
24 +++ b/drivers/char/Kconfig
25 @@ -586,6 +586,8 @@ config DEVPORT
26  
27  source "drivers/s390/char/Kconfig"
28  
29 +source "drivers/char/broadcom/Kconfig"
30 +
31  config MSM_SMD_PKT
32         bool "Enable device interface for some SMD packet ports"
33         default n
34 --- a/drivers/char/Makefile
35 +++ b/drivers/char/Makefile
36 @@ -62,3 +62,6 @@ obj-$(CONFIG_JS_RTC)          += js-rtc.o
37  js-rtc-y = rtc.o
38  
39  obj-$(CONFIG_TILE_SROM)                += tile-srom.o
40 +
41 +obj-$(CONFIG_BRCM_CHAR_DRIVERS) += broadcom/
42 +
43 --- /dev/null
44 +++ b/drivers/char/broadcom/Kconfig
45 @@ -0,0 +1,16 @@
46 +#
47 +# Broadcom char driver config
48 +#
49 +
50 +menuconfig BRCM_CHAR_DRIVERS
51 +       bool "Broadcom Char Drivers"
52 +       help
53 +         Broadcom's char drivers
54 +
55 +config BCM_VC_CMA
56 +       bool "Videocore CMA"
57 +       depends on CMA && BRCM_CHAR_DRIVERS
58 +       default n
59 +        help
60 +          Helper for videocore CMA access.
61 +
62 --- /dev/null
63 +++ b/drivers/char/broadcom/Makefile
64 @@ -0,0 +1,2 @@
65 +obj-$(CONFIG_BCM_VC_CMA)       += vc_cma/
66 +
67 --- /dev/null
68 +++ b/drivers/char/broadcom/vc_cma/Makefile
69 @@ -0,0 +1,15 @@
70 +EXTRA_CFLAGS  += -Wall -Wstrict-prototypes -Wno-trigraphs
71 +EXTRA_CFLAGS  += -Werror
72 +EXTRA_CFLAGS  += -I"include/linux/broadcom"
73 +EXTRA_CFLAGS  += -I"drivers/misc/vc04_services"
74 +EXTRA_CFLAGS  += -I"drivers/misc/vc04_services/interface/vchi"
75 +EXTRA_CFLAGS  += -I"drivers/misc/vc04_services/interface/vchiq_arm"
76 +
77 +EXTRA_CFLAGS  += -D__KERNEL__
78 +EXTRA_CFLAGS  += -D__linux__
79 +EXTRA_CFLAGS  += -Werror
80 +
81 +obj-$(CONFIG_BCM_VC_CMA) += vc-cma.o
82 +
83 +vc-cma-objs := vc_cma.o
84 +
85 --- /dev/null
86 +++ b/drivers/char/broadcom/vc_cma/vc_cma.c
87 @@ -0,0 +1,1143 @@
88 +/**
89 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
90 + *
91 + * Redistribution and use in source and binary forms, with or without
92 + * modification, are permitted provided that the following conditions
93 + * are met:
94 + * 1. Redistributions of source code must retain the above copyright
95 + *    notice, this list of conditions, and the following disclaimer,
96 + *    without modification.
97 + * 2. Redistributions in binary form must reproduce the above copyright
98 + *    notice, this list of conditions and the following disclaimer in the
99 + *    documentation and/or other materials provided with the distribution.
100 + * 3. The names of the above-listed copyright holders may not be used
101 + *    to endorse or promote products derived from this software without
102 + *    specific prior written permission.
103 + *
104 + * ALTERNATIVELY, this software may be distributed under the terms of the
105 + * GNU General Public License ("GPL") version 2, as published by the Free
106 + * Software Foundation.
107 + *
108 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
109 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
110 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
111 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
112 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
113 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
114 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
115 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
116 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
117 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
118 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
119 + */
120 +
121 +#include <linux/kernel.h>
122 +#include <linux/module.h>
123 +#include <linux/kthread.h>
124 +#include <linux/fs.h>
125 +#include <linux/device.h>
126 +#include <linux/cdev.h>
127 +#include <linux/mm.h>
128 +#include <linux/proc_fs.h>
129 +#include <linux/seq_file.h>
130 +#include <linux/dma-mapping.h>
131 +#include <linux/dma-contiguous.h>
132 +#include <linux/platform_device.h>
133 +#include <linux/uaccess.h>
134 +#include <asm/cacheflush.h>
135 +
136 +#include "vc_cma.h"
137 +
138 +#include "vchiq_util.h"
139 +#include "vchiq_connected.h"
140 +//#include "debug_sym.h"
141 +//#include "vc_mem.h"
142 +
143 +#define DRIVER_NAME  "vc-cma"
144 +
145 +#define LOG_DBG(fmt, ...) \
146 +       if (vc_cma_debug) \
147 +               printk(KERN_INFO fmt "\n", ##__VA_ARGS__)
148 +#define LOG_ERR(fmt, ...) \
149 +       printk(KERN_ERR fmt "\n", ##__VA_ARGS__)
150 +
151 +#define VC_CMA_FOURCC VCHIQ_MAKE_FOURCC('C', 'M', 'A', ' ')
152 +#define VC_CMA_VERSION 2
153 +
154 +#define VC_CMA_CHUNK_ORDER 6   /* 256K */
155 +#define VC_CMA_CHUNK_SIZE (4096 << VC_CMA_CHUNK_ORDER)
156 +#define VC_CMA_MAX_PARAMS_PER_MSG \
157 +       ((VCHIQ_MAX_MSG_SIZE - sizeof(unsigned short))/sizeof(unsigned short))
158 +#define VC_CMA_RESERVE_COUNT_MAX 16
159 +
160 +#define PAGES_PER_CHUNK (VC_CMA_CHUNK_SIZE / PAGE_SIZE)
161 +
162 +#define VCADDR_TO_PHYSADDR(vcaddr) (mm_vc_mem_phys_addr + vcaddr)
163 +
164 +#define loud_error(...) \
165 +       LOG_ERR("===== " __VA_ARGS__)
166 +
167 +enum {
168 +       VC_CMA_MSG_QUIT,
169 +       VC_CMA_MSG_OPEN,
170 +       VC_CMA_MSG_TICK,
171 +       VC_CMA_MSG_ALLOC,       /* chunk count */
172 +       VC_CMA_MSG_FREE,        /* chunk, chunk, ... */
173 +       VC_CMA_MSG_ALLOCATED,   /* chunk, chunk, ... */
174 +       VC_CMA_MSG_REQUEST_ALLOC,       /* chunk count */
175 +       VC_CMA_MSG_REQUEST_FREE,        /* chunk count */
176 +       VC_CMA_MSG_RESERVE,     /* bytes lo, bytes hi */
177 +       VC_CMA_MSG_UPDATE_RESERVE,
178 +       VC_CMA_MSG_MAX
179 +};
180 +
181 +struct cma_msg {
182 +       unsigned short type;
183 +       unsigned short params[VC_CMA_MAX_PARAMS_PER_MSG];
184 +};
185 +
186 +struct vc_cma_reserve_user {
187 +       unsigned int pid;
188 +       unsigned int reserve;
189 +};
190 +
191 +/* Device (/dev) related variables */
192 +static dev_t vc_cma_devnum;
193 +static struct class *vc_cma_class;
194 +static struct cdev vc_cma_cdev;
195 +static int vc_cma_inited;
196 +static int vc_cma_debug;
197 +
198 +/* Proc entry */
199 +static struct proc_dir_entry *vc_cma_proc_entry;
200 +
201 +phys_addr_t vc_cma_base;
202 +struct page *vc_cma_base_page;
203 +unsigned int vc_cma_size;
204 +EXPORT_SYMBOL(vc_cma_size);
205 +unsigned int vc_cma_initial;
206 +unsigned int vc_cma_chunks;
207 +unsigned int vc_cma_chunks_used;
208 +unsigned int vc_cma_chunks_reserved;
209 +
210 +static int in_loud_error;
211 +
212 +unsigned int vc_cma_reserve_total;
213 +unsigned int vc_cma_reserve_count;
214 +struct vc_cma_reserve_user vc_cma_reserve_users[VC_CMA_RESERVE_COUNT_MAX];
215 +static DEFINE_SEMAPHORE(vc_cma_reserve_mutex);
216 +static DEFINE_SEMAPHORE(vc_cma_worker_queue_push_mutex);
217 +
218 +static u64 vc_cma_dma_mask = DMA_BIT_MASK(32);
219 +static struct platform_device vc_cma_device = {
220 +       .name = "vc-cma",
221 +       .id = 0,
222 +       .dev = {
223 +               .dma_mask = &vc_cma_dma_mask,
224 +               .coherent_dma_mask = DMA_BIT_MASK(32),
225 +               },
226 +};
227 +
228 +static VCHIQ_INSTANCE_T cma_instance;
229 +static VCHIQ_SERVICE_HANDLE_T cma_service;
230 +static VCHIU_QUEUE_T cma_msg_queue;
231 +static struct task_struct *cma_worker;
232 +
233 +static int vc_cma_set_reserve(unsigned int reserve, unsigned int pid);
234 +static int vc_cma_alloc_chunks(int num_chunks, struct cma_msg *reply);
235 +static VCHIQ_STATUS_T cma_service_callback(VCHIQ_REASON_T reason,
236 +                                          VCHIQ_HEADER_T * header,
237 +                                          VCHIQ_SERVICE_HANDLE_T service,
238 +                                          void *bulk_userdata);
239 +static void send_vc_msg(unsigned short type,
240 +                       unsigned short param1, unsigned short param2);
241 +static bool send_worker_msg(VCHIQ_HEADER_T * msg);
242 +
243 +static int early_vc_cma_mem(char *p)
244 +{
245 +       unsigned int new_size;
246 +       printk(KERN_NOTICE "early_vc_cma_mem(%s)", p);
247 +       vc_cma_size = memparse(p, &p);
248 +       vc_cma_initial = vc_cma_size;
249 +       if (*p == '/')
250 +               vc_cma_size = memparse(p + 1, &p);
251 +       if (*p == '@')
252 +               vc_cma_base = memparse(p + 1, &p);
253 +
254 +       new_size = (vc_cma_size - ((-vc_cma_base) & (VC_CMA_CHUNK_SIZE - 1)))
255 +           & ~(VC_CMA_CHUNK_SIZE - 1);
256 +       if (new_size > vc_cma_size)
257 +               vc_cma_size = 0;
258 +       vc_cma_initial = (vc_cma_initial + VC_CMA_CHUNK_SIZE - 1)
259 +           & ~(VC_CMA_CHUNK_SIZE - 1);
260 +       if (vc_cma_initial > vc_cma_size)
261 +               vc_cma_initial = vc_cma_size;
262 +       vc_cma_base = (vc_cma_base + VC_CMA_CHUNK_SIZE - 1)
263 +           & ~(VC_CMA_CHUNK_SIZE - 1);
264 +
265 +       printk(KERN_NOTICE " -> initial %x, size %x, base %x", vc_cma_initial,
266 +              vc_cma_size, (unsigned int)vc_cma_base);
267 +
268 +       return 0;
269 +}
270 +
271 +early_param("vc-cma-mem", early_vc_cma_mem);
272 +
273 +void vc_cma_early_init(void)
274 +{
275 +       LOG_DBG("vc_cma_early_init - vc_cma_chunks = %d", vc_cma_chunks);
276 +       if (vc_cma_size) {
277 +               int rc = platform_device_register(&vc_cma_device);
278 +               LOG_DBG("platform_device_register -> %d", rc);
279 +       }
280 +}
281 +
282 +void vc_cma_reserve(void)
283 +{
284 +       /* if vc_cma_size is set, then declare vc CMA area of the same
285 +        * size from the end of memory
286 +        */
287 +       if (vc_cma_size) {
288 +               if (dma_declare_contiguous(NULL /*&vc_cma_device.dev*/, vc_cma_size,
289 +                                          vc_cma_base, 0) == 0) {
290 +               } else {
291 +                       LOG_ERR("vc_cma: dma_declare_contiguous(%x,%x) failed",
292 +                               vc_cma_size, (unsigned int)vc_cma_base);
293 +                       vc_cma_size = 0;
294 +               }
295 +       }
296 +       vc_cma_chunks = vc_cma_size / VC_CMA_CHUNK_SIZE;
297 +}
298 +
299 +/****************************************************************************
300 +*
301 +*   vc_cma_open
302 +*
303 +***************************************************************************/
304 +
305 +static int vc_cma_open(struct inode *inode, struct file *file)
306 +{
307 +       (void)inode;
308 +       (void)file;
309 +
310 +       return 0;
311 +}
312 +
313 +/****************************************************************************
314 +*
315 +*   vc_cma_release
316 +*
317 +***************************************************************************/
318 +
319 +static int vc_cma_release(struct inode *inode, struct file *file)
320 +{
321 +       (void)inode;
322 +       (void)file;
323 +
324 +       vc_cma_set_reserve(0, current->tgid);
325 +
326 +       return 0;
327 +}
328 +
329 +/****************************************************************************
330 +*
331 +*   vc_cma_ioctl
332 +*
333 +***************************************************************************/
334 +
335 +static long vc_cma_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
336 +{
337 +       int rc = 0;
338 +
339 +       (void)cmd;
340 +       (void)arg;
341 +
342 +       switch (cmd) {
343 +       case VC_CMA_IOC_RESERVE:
344 +               rc = vc_cma_set_reserve((unsigned int)arg, current->tgid);
345 +               if (rc >= 0)
346 +                       rc = 0;
347 +               break;
348 +       default:
349 +               LOG_ERR("vc-cma: Unknown ioctl %x", cmd);
350 +               return -ENOTTY;
351 +       }
352 +
353 +       return rc;
354 +}
355 +
356 +/****************************************************************************
357 +*
358 +*   File Operations for the driver.
359 +*
360 +***************************************************************************/
361 +
362 +static const struct file_operations vc_cma_fops = {
363 +       .owner = THIS_MODULE,
364 +       .open = vc_cma_open,
365 +       .release = vc_cma_release,
366 +       .unlocked_ioctl = vc_cma_ioctl,
367 +};
368 +
369 +/****************************************************************************
370 +*
371 +*   vc_cma_proc_open
372 +*
373 +***************************************************************************/
374 +
375 +static int vc_cma_show_info(struct seq_file *m, void *v)
376 +{
377 +       int i;
378 +
379 +       seq_printf(m, "Videocore CMA:\n");
380 +       seq_printf(m, "   Base       : %08x\n", (unsigned int)vc_cma_base);
381 +       seq_printf(m, "   Length     : %08x\n", vc_cma_size);
382 +       seq_printf(m, "   Initial    : %08x\n", vc_cma_initial);
383 +       seq_printf(m, "   Chunk size : %08x\n", VC_CMA_CHUNK_SIZE);
384 +       seq_printf(m, "   Chunks     : %4d (%d bytes)\n",
385 +                  (int)vc_cma_chunks,
386 +                  (int)(vc_cma_chunks * VC_CMA_CHUNK_SIZE));
387 +       seq_printf(m, "   Used       : %4d (%d bytes)\n",
388 +                  (int)vc_cma_chunks_used,
389 +                  (int)(vc_cma_chunks_used * VC_CMA_CHUNK_SIZE));
390 +       seq_printf(m, "   Reserved   : %4d (%d bytes)\n",
391 +                  (unsigned int)vc_cma_chunks_reserved,
392 +                  (int)(vc_cma_chunks_reserved * VC_CMA_CHUNK_SIZE));
393 +
394 +       for (i = 0; i < vc_cma_reserve_count; i++) {
395 +               struct vc_cma_reserve_user *user = &vc_cma_reserve_users[i];
396 +               seq_printf(m, "     PID %5d: %d bytes\n", user->pid,
397 +                          user->reserve);
398 +       }
399 +
400 +       seq_printf(m, "\n");
401 +
402 +       return 0;
403 +}
404 +
405 +static int vc_cma_proc_open(struct inode *inode, struct file *file)
406 +{
407 +       return single_open(file, vc_cma_show_info, NULL);
408 +}
409 +
410 +/****************************************************************************
411 +*
412 +*   vc_cma_proc_write
413 +*
414 +***************************************************************************/
415 +
416 +static int vc_cma_proc_write(struct file *file,
417 +                            const char __user *buffer,
418 +                            size_t size, loff_t *ppos)
419 +{
420 +       int rc = -EFAULT;
421 +       char input_str[20];
422 +
423 +       memset(input_str, 0, sizeof(input_str));
424 +
425 +       if (size > sizeof(input_str)) {
426 +               LOG_ERR("%s: input string length too long", __func__);
427 +               goto out;
428 +       }
429 +
430 +       if (copy_from_user(input_str, buffer, size - 1)) {
431 +               LOG_ERR("%s: failed to get input string", __func__);
432 +               goto out;
433 +       }
434 +#define ALLOC_STR "alloc"
435 +#define FREE_STR "free"
436 +#define DEBUG_STR "debug"
437 +#define RESERVE_STR "reserve"
438 +       if (strncmp(input_str, ALLOC_STR, strlen(ALLOC_STR)) == 0) {
439 +               int size;
440 +               char *p = input_str + strlen(ALLOC_STR);
441 +
442 +               while (*p == ' ')
443 +                       p++;
444 +               size = memparse(p, NULL);
445 +               LOG_ERR("/proc/vc-cma: alloc %d", size);
446 +               if (size)
447 +                       send_vc_msg(VC_CMA_MSG_REQUEST_FREE,
448 +                                   size / VC_CMA_CHUNK_SIZE, 0);
449 +               else
450 +                       LOG_ERR("invalid size '%s'", p);
451 +               rc = size;
452 +       } else if (strncmp(input_str, FREE_STR, strlen(FREE_STR)) == 0) {
453 +               int size;
454 +               char *p = input_str + strlen(FREE_STR);
455 +
456 +               while (*p == ' ')
457 +                       p++;
458 +               size = memparse(p, NULL);
459 +               LOG_ERR("/proc/vc-cma: free %d", size);
460 +               if (size)
461 +                       send_vc_msg(VC_CMA_MSG_REQUEST_ALLOC,
462 +                                   size / VC_CMA_CHUNK_SIZE, 0);
463 +               else
464 +                       LOG_ERR("invalid size '%s'", p);
465 +               rc = size;
466 +       } else if (strncmp(input_str, DEBUG_STR, strlen(DEBUG_STR)) == 0) {
467 +               char *p = input_str + strlen(DEBUG_STR);
468 +               while (*p == ' ')
469 +                       p++;
470 +               if ((strcmp(p, "on") == 0) || (strcmp(p, "1") == 0))
471 +                       vc_cma_debug = 1;
472 +               else if ((strcmp(p, "off") == 0) || (strcmp(p, "0") == 0))
473 +                       vc_cma_debug = 0;
474 +               LOG_ERR("/proc/vc-cma: debug %s", vc_cma_debug ? "on" : "off");
475 +               rc = size;
476 +       } else if (strncmp(input_str, RESERVE_STR, strlen(RESERVE_STR)) == 0) {
477 +               int size;
478 +               int reserved;
479 +               char *p = input_str + strlen(RESERVE_STR);
480 +               while (*p == ' ')
481 +                       p++;
482 +               size = memparse(p, NULL);
483 +
484 +               reserved = vc_cma_set_reserve(size, current->tgid);
485 +               rc = (reserved >= 0) ? size : reserved;
486 +       }
487 +
488 +out:
489 +       return rc;
490 +}
491 +
492 +/****************************************************************************
493 +*
494 +*   File Operations for /proc interface.
495 +*
496 +***************************************************************************/
497 +
498 +static const struct file_operations vc_cma_proc_fops = {
499 +       .open = vc_cma_proc_open,
500 +       .read = seq_read,
501 +       .write = vc_cma_proc_write,
502 +       .llseek = seq_lseek,
503 +       .release = single_release
504 +};
505 +
506 +static int vc_cma_set_reserve(unsigned int reserve, unsigned int pid)
507 +{
508 +       struct vc_cma_reserve_user *user = NULL;
509 +       int delta = 0;
510 +       int i;
511 +
512 +       if (down_interruptible(&vc_cma_reserve_mutex))
513 +               return -ERESTARTSYS;
514 +
515 +       for (i = 0; i < vc_cma_reserve_count; i++) {
516 +               if (pid == vc_cma_reserve_users[i].pid) {
517 +                       user = &vc_cma_reserve_users[i];
518 +                       delta = reserve - user->reserve;
519 +                       if (reserve)
520 +                               user->reserve = reserve;
521 +                       else {
522 +                               /* Remove this entry by copying downwards */
523 +                               while ((i + 1) < vc_cma_reserve_count) {
524 +                                       user[0].pid = user[1].pid;
525 +                                       user[0].reserve = user[1].reserve;
526 +                                       user++;
527 +                                       i++;
528 +                               }
529 +                               vc_cma_reserve_count--;
530 +                               user = NULL;
531 +                       }
532 +                       break;
533 +               }
534 +       }
535 +
536 +       if (reserve && !user) {
537 +               if (vc_cma_reserve_count == VC_CMA_RESERVE_COUNT_MAX) {
538 +                       LOG_ERR("vc-cma: Too many reservations - "
539 +                               "increase CMA_RESERVE_COUNT_MAX");
540 +                       up(&vc_cma_reserve_mutex);
541 +                       return -EBUSY;
542 +               }
543 +               user = &vc_cma_reserve_users[vc_cma_reserve_count];
544 +               user->pid = pid;
545 +               user->reserve = reserve;
546 +               delta = reserve;
547 +               vc_cma_reserve_count++;
548 +       }
549 +
550 +       vc_cma_reserve_total += delta;
551 +
552 +       send_vc_msg(VC_CMA_MSG_RESERVE,
553 +                   vc_cma_reserve_total & 0xffff, vc_cma_reserve_total >> 16);
554 +
555 +       send_worker_msg((VCHIQ_HEADER_T *) VC_CMA_MSG_UPDATE_RESERVE);
556 +
557 +       LOG_DBG("/proc/vc-cma: reserve %d (PID %d) - total %u",
558 +               reserve, pid, vc_cma_reserve_total);
559 +
560 +       up(&vc_cma_reserve_mutex);
561 +
562 +       return vc_cma_reserve_total;
563 +}
564 +
565 +static VCHIQ_STATUS_T cma_service_callback(VCHIQ_REASON_T reason,
566 +                                          VCHIQ_HEADER_T * header,
567 +                                          VCHIQ_SERVICE_HANDLE_T service,
568 +                                          void *bulk_userdata)
569 +{
570 +       switch (reason) {
571 +       case VCHIQ_MESSAGE_AVAILABLE:
572 +               if (!send_worker_msg(header))
573 +                       return VCHIQ_RETRY;
574 +               break;
575 +       case VCHIQ_SERVICE_CLOSED:
576 +               LOG_DBG("CMA service closed");
577 +               break;
578 +       default:
579 +               LOG_ERR("Unexpected CMA callback reason %d", reason);
580 +               break;
581 +       }
582 +       return VCHIQ_SUCCESS;
583 +}
584 +
585 +static void send_vc_msg(unsigned short type,
586 +                       unsigned short param1, unsigned short param2)
587 +{
588 +       unsigned short msg[] = { type, param1, param2 };
589 +       VCHIQ_ELEMENT_T elem = { &msg, sizeof(msg) };
590 +       VCHIQ_STATUS_T ret;
591 +       vchiq_use_service(cma_service);
592 +       ret = vchiq_queue_message(cma_service, &elem, 1);
593 +       vchiq_release_service(cma_service);
594 +       if (ret != VCHIQ_SUCCESS)
595 +               LOG_ERR("vchiq_queue_message returned %x", ret);
596 +}
597 +
598 +static bool send_worker_msg(VCHIQ_HEADER_T * msg)
599 +{
600 +       if (down_interruptible(&vc_cma_worker_queue_push_mutex))
601 +               return false;
602 +       vchiu_queue_push(&cma_msg_queue, msg);
603 +       up(&vc_cma_worker_queue_push_mutex);
604 +       return true;
605 +}
606 +
607 +static int vc_cma_alloc_chunks(int num_chunks, struct cma_msg *reply)
608 +{
609 +       int i;
610 +       for (i = 0; i < num_chunks; i++) {
611 +               struct page *chunk;
612 +               unsigned int chunk_num;
613 +               uint8_t *chunk_addr;
614 +               size_t chunk_size = PAGES_PER_CHUNK << PAGE_SHIFT;
615 +
616 +               chunk = dma_alloc_from_contiguous(NULL /*&vc_cma_device.dev*/,
617 +                                                 PAGES_PER_CHUNK,
618 +                                                 VC_CMA_CHUNK_ORDER);
619 +               if (!chunk)
620 +                       break;
621 +
622 +               chunk_addr = page_address(chunk);
623 +               dmac_flush_range(chunk_addr, chunk_addr + chunk_size);
624 +               outer_inv_range(__pa(chunk_addr), __pa(chunk_addr) +
625 +                       chunk_size);
626 +
627 +               chunk_num =
628 +                   (page_to_phys(chunk) - vc_cma_base) / VC_CMA_CHUNK_SIZE;
629 +               BUG_ON(((page_to_phys(chunk) - vc_cma_base) %
630 +                       VC_CMA_CHUNK_SIZE) != 0);
631 +               if (chunk_num >= vc_cma_chunks) {
632 +                       LOG_ERR("%s: ===============================",
633 +                               __func__);
634 +                       LOG_ERR("%s: chunk phys %x, vc_cma %x-%x - "
635 +                               "bad SPARSEMEM configuration?",
636 +                               __func__, (unsigned int)page_to_phys(chunk),
637 +                               vc_cma_base, vc_cma_base + vc_cma_size - 1);
638 +                       LOG_ERR("%s: dev->cma_area = %p\n", __func__,
639 +                               vc_cma_device.dev.cma_area);
640 +                       LOG_ERR("%s: ===============================",
641 +                               __func__);
642 +                       break;
643 +               }
644 +               reply->params[i] = chunk_num;
645 +               vc_cma_chunks_used++;
646 +       }
647 +
648 +       if (i < num_chunks) {
649 +               LOG_ERR("%s: dma_alloc_from_contiguous failed "
650 +                       "for %x bytes (alloc %d of %d, %d free)",
651 +                       __func__, VC_CMA_CHUNK_SIZE, i,
652 +                       num_chunks, vc_cma_chunks - vc_cma_chunks_used);
653 +               num_chunks = i;
654 +       }
655 +
656 +       LOG_DBG("CMA allocated %d chunks -> %d used",
657 +               num_chunks, vc_cma_chunks_used);
658 +       reply->type = VC_CMA_MSG_ALLOCATED;
659 +
660 +       {
661 +               VCHIQ_ELEMENT_T elem = {
662 +                       reply,
663 +                       offsetof(struct cma_msg, params[0]) +
664 +                           num_chunks * sizeof(reply->params[0])
665 +               };
666 +               VCHIQ_STATUS_T ret;
667 +               vchiq_use_service(cma_service);
668 +               ret = vchiq_queue_message(cma_service, &elem, 1);
669 +               vchiq_release_service(cma_service);
670 +               if (ret != VCHIQ_SUCCESS)
671 +                       LOG_ERR("vchiq_queue_message return " "%x", ret);
672 +       }
673 +
674 +       return num_chunks;
675 +}
676 +
677 +static int cma_worker_proc(void *param)
678 +{
679 +       static struct cma_msg reply;
680 +       (void)param;
681 +
682 +       while (1) {
683 +               VCHIQ_HEADER_T *msg;
684 +               static struct cma_msg msg_copy;
685 +               struct cma_msg *cma_msg = &msg_copy;
686 +               int type, msg_size;
687 +
688 +               msg = vchiu_queue_pop(&cma_msg_queue);
689 +               if ((unsigned int)msg >= VC_CMA_MSG_MAX) {
690 +                       msg_size = msg->size;
691 +                       memcpy(&msg_copy, msg->data, msg_size);
692 +                       type = cma_msg->type;
693 +                       vchiq_release_message(cma_service, msg);
694 +               } else {
695 +                       msg_size = 0;
696 +                       type = (int)msg;
697 +                       if (type == VC_CMA_MSG_QUIT)
698 +                               break;
699 +                       else if (type == VC_CMA_MSG_UPDATE_RESERVE) {
700 +                               msg = NULL;
701 +                               cma_msg = NULL;
702 +                       } else {
703 +                               BUG();
704 +                               continue;
705 +                       }
706 +               }
707 +
708 +               switch (type) {
709 +               case VC_CMA_MSG_ALLOC:{
710 +                               int num_chunks, free_chunks;
711 +                               num_chunks = cma_msg->params[0];
712 +                               free_chunks =
713 +                                   vc_cma_chunks - vc_cma_chunks_used;
714 +                               LOG_DBG("CMA_MSG_ALLOC(%d chunks)", num_chunks);
715 +                               if (num_chunks > VC_CMA_MAX_PARAMS_PER_MSG) {
716 +                                       LOG_ERR
717 +                                           ("CMA_MSG_ALLOC - chunk count (%d) "
718 +                                            "exceeds VC_CMA_MAX_PARAMS_PER_MSG (%d)",
719 +                                            num_chunks,
720 +                                            VC_CMA_MAX_PARAMS_PER_MSG);
721 +                                       num_chunks = VC_CMA_MAX_PARAMS_PER_MSG;
722 +                               }
723 +
724 +                               if (num_chunks > free_chunks) {
725 +                                       LOG_ERR
726 +                                           ("CMA_MSG_ALLOC - chunk count (%d) "
727 +                                            "exceeds free chunks (%d)",
728 +                                            num_chunks, free_chunks);
729 +                                       num_chunks = free_chunks;
730 +                               }
731 +
732 +                               vc_cma_alloc_chunks(num_chunks, &reply);
733 +                       }
734 +                       break;
735 +
736 +               case VC_CMA_MSG_FREE:{
737 +                               int chunk_count =
738 +                                   (msg_size -
739 +                                    offsetof(struct cma_msg,
740 +                                             params)) /
741 +                                   sizeof(cma_msg->params[0]);
742 +                               int i;
743 +                               BUG_ON(chunk_count <= 0);
744 +
745 +                               LOG_DBG("CMA_MSG_FREE(%d chunks - %x, ...)",
746 +                                       chunk_count, cma_msg->params[0]);
747 +                               for (i = 0; i < chunk_count; i++) {
748 +                                       int chunk_num = cma_msg->params[i];
749 +                                       struct page *page = vc_cma_base_page +
750 +                                           chunk_num * PAGES_PER_CHUNK;
751 +                                       if (chunk_num >= vc_cma_chunks) {
752 +                                               LOG_ERR
753 +                                                   ("CMA_MSG_FREE - chunk %d of %d"
754 +                                                    " (value %x) exceeds maximum "
755 +                                                    "(%x)", i, chunk_count,
756 +                                                    chunk_num,
757 +                                                    vc_cma_chunks - 1);
758 +                                               break;
759 +                                       }
760 +
761 +                                       if (!dma_release_from_contiguous
762 +                                           (NULL /*&vc_cma_device.dev*/, page,
763 +                                            PAGES_PER_CHUNK)) {
764 +                                               LOG_ERR
765 +                                                   ("CMA_MSG_FREE - failed to "
766 +                                                    "release chunk %d (phys %x, "
767 +                                                    "page %x)", chunk_num,
768 +                                                    page_to_phys(page),
769 +                                                    (unsigned int)page);
770 +                                       }
771 +                                       vc_cma_chunks_used--;
772 +                               }
773 +                               LOG_DBG("CMA released %d chunks -> %d used",
774 +                                       i, vc_cma_chunks_used);
775 +                       }
776 +                       break;
777 +
778 +               case VC_CMA_MSG_UPDATE_RESERVE:{
779 +                               int chunks_needed =
780 +                                   ((vc_cma_reserve_total + VC_CMA_CHUNK_SIZE -
781 +                                     1)
782 +                                    / VC_CMA_CHUNK_SIZE) -
783 +                                   vc_cma_chunks_reserved;
784 +
785 +                               LOG_DBG
786 +                                   ("CMA_MSG_UPDATE_RESERVE(%d chunks needed)",
787 +                                    chunks_needed);
788 +
789 +                               /* Cap the reservations to what is available */
790 +                               if (chunks_needed > 0) {
791 +                                       if (chunks_needed >
792 +                                           (vc_cma_chunks -
793 +                                            vc_cma_chunks_used))
794 +                                               chunks_needed =
795 +                                                   (vc_cma_chunks -
796 +                                                    vc_cma_chunks_used);
797 +
798 +                                       chunks_needed =
799 +                                           vc_cma_alloc_chunks(chunks_needed,
800 +                                                               &reply);
801 +                               }
802 +
803 +                               LOG_DBG
804 +                                   ("CMA_MSG_UPDATE_RESERVE(%d chunks allocated)",
805 +                                    chunks_needed);
806 +                               vc_cma_chunks_reserved += chunks_needed;
807 +                       }
808 +                       break;
809 +
810 +               default:
811 +                       LOG_ERR("unexpected msg type %d", type);
812 +                       break;
813 +               }
814 +       }
815 +
816 +       LOG_DBG("quitting...");
817 +       return 0;
818 +}
819 +
820 +/****************************************************************************
821 +*
822 +*   vc_cma_connected_init
823 +*
824 +*   This function is called once the videocore has been connected.
825 +*
826 +***************************************************************************/
827 +
828 +static void vc_cma_connected_init(void)
829 +{
830 +       VCHIQ_SERVICE_PARAMS_T service_params;
831 +
832 +       LOG_DBG("vc_cma_connected_init");
833 +
834 +       if (!vchiu_queue_init(&cma_msg_queue, 16)) {
835 +               LOG_ERR("could not create CMA msg queue");
836 +               goto fail_queue;
837 +       }
838 +
839 +       if (vchiq_initialise(&cma_instance) != VCHIQ_SUCCESS)
840 +               goto fail_vchiq_init;
841 +
842 +       vchiq_connect(cma_instance);
843 +
844 +       service_params.fourcc = VC_CMA_FOURCC;
845 +       service_params.callback = cma_service_callback;
846 +       service_params.userdata = NULL;
847 +       service_params.version = VC_CMA_VERSION;
848 +       service_params.version_min = VC_CMA_VERSION;
849 +
850 +       if (vchiq_open_service(cma_instance, &service_params,
851 +                              &cma_service) != VCHIQ_SUCCESS) {
852 +               LOG_ERR("failed to open service - already in use?");
853 +               goto fail_vchiq_open;
854 +       }
855 +
856 +       vchiq_release_service(cma_service);
857 +
858 +       cma_worker = kthread_create(cma_worker_proc, NULL, "cma_worker");
859 +       if (!cma_worker) {
860 +               LOG_ERR("could not create CMA worker thread");
861 +               goto fail_worker;
862 +       }
863 +       set_user_nice(cma_worker, -20);
864 +       wake_up_process(cma_worker);
865 +
866 +       return;
867 +
868 +fail_worker:
869 +       vchiq_close_service(cma_service);
870 +fail_vchiq_open:
871 +       vchiq_shutdown(cma_instance);
872 +fail_vchiq_init:
873 +       vchiu_queue_delete(&cma_msg_queue);
874 +fail_queue:
875 +       return;
876 +}
877 +
878 +void
879 +loud_error_header(void)
880 +{
881 +       if (in_loud_error)
882 +               return;
883 +
884 +       LOG_ERR("============================================================"
885 +               "================");
886 +       LOG_ERR("============================================================"
887 +               "================");
888 +       LOG_ERR("=====");
889 +
890 +       in_loud_error = 1;
891 +}
892 +
893 +void
894 +loud_error_footer(void)
895 +{
896 +       if (!in_loud_error)
897 +               return;
898 +
899 +       LOG_ERR("=====");
900 +       LOG_ERR("============================================================"
901 +               "================");
902 +       LOG_ERR("============================================================"
903 +               "================");
904 +
905 +       in_loud_error = 0;
906 +}
907 +
908 +#if 1
909 +static int check_cma_config(void) { return 1; }
910 +#else
911 +static int
912 +read_vc_debug_var(VC_MEM_ACCESS_HANDLE_T handle,
913 +       const char *symbol,
914 +       void *buf, size_t bufsize)
915 +{
916 +       VC_MEM_ADDR_T vcMemAddr;
917 +       size_t vcMemSize;
918 +       uint8_t *mapAddr;
919 +       off_t  vcMapAddr;
920 +
921 +       if (!LookupVideoCoreSymbol(handle, symbol,
922 +               &vcMemAddr,
923 +               &vcMemSize)) {
924 +               loud_error_header();
925 +               loud_error(
926 +                       "failed to find VC symbol \"%s\".",
927 +                       symbol);
928 +               loud_error_footer();
929 +               return 0;
930 +       }
931 +
932 +       if (vcMemSize != bufsize) {
933 +               loud_error_header();
934 +               loud_error(
935 +                       "VC symbol \"%s\" is the wrong size.",
936 +                       symbol);
937 +               loud_error_footer();
938 +               return 0;
939 +       }
940 +
941 +       vcMapAddr = (off_t)vcMemAddr & VC_MEM_TO_ARM_ADDR_MASK;
942 +       vcMapAddr += mm_vc_mem_phys_addr;
943 +       mapAddr = ioremap_nocache(vcMapAddr, vcMemSize);
944 +       if (mapAddr == 0) {
945 +               loud_error_header();
946 +               loud_error(
947 +                       "failed to ioremap \"%s\" @ 0x%x "
948 +                       "(phys: 0x%x, size: %u).",
949 +                       symbol,
950 +                       (unsigned int)vcMapAddr,
951 +                       (unsigned int)vcMemAddr,
952 +                       (unsigned int)vcMemSize);
953 +               loud_error_footer();
954 +               return 0;
955 +       }
956 +
957 +       memcpy(buf, mapAddr, bufsize);
958 +       iounmap(mapAddr);
959 +
960 +       return 1;
961 +}
962 +
963 +
964 +static int
965 +check_cma_config(void)
966 +{
967 +       VC_MEM_ACCESS_HANDLE_T mem_hndl;
968 +       VC_MEM_ADDR_T mempool_start;
969 +       VC_MEM_ADDR_T mempool_end;
970 +       VC_MEM_ADDR_T mempool_offline_start;
971 +       VC_MEM_ADDR_T mempool_offline_end;
972 +       VC_MEM_ADDR_T cam_alloc_base;
973 +       VC_MEM_ADDR_T cam_alloc_size;
974 +       VC_MEM_ADDR_T cam_alloc_end;
975 +       int success = 0;
976 +
977 +       if (OpenVideoCoreMemory(&mem_hndl) != 0)
978 +               goto out;
979 +
980 +       /* Read the relevant VideoCore variables */
981 +       if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_START",
982 +               &mempool_start,
983 +               sizeof(mempool_start)))
984 +               goto close;
985 +
986 +       if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_END",
987 +               &mempool_end,
988 +               sizeof(mempool_end)))
989 +               goto close;
990 +
991 +       if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_OFFLINE_START",
992 +               &mempool_offline_start,
993 +               sizeof(mempool_offline_start)))
994 +               goto close;
995 +
996 +       if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_OFFLINE_END",
997 +               &mempool_offline_end,
998 +               sizeof(mempool_offline_end)))
999 +               goto close;
1000 +
1001 +       if (!read_vc_debug_var(mem_hndl, "cam_alloc_base",
1002 +               &cam_alloc_base,
1003 +               sizeof(cam_alloc_base)))
1004 +               goto close;
1005 +
1006 +       if (!read_vc_debug_var(mem_hndl, "cam_alloc_size",
1007 +               &cam_alloc_size,
1008 +               sizeof(cam_alloc_size)))
1009 +               goto close;
1010 +
1011 +       cam_alloc_end = cam_alloc_base + cam_alloc_size;
1012 +
1013 +       success = 1;
1014 +
1015 +       /* Now the sanity checks */
1016 +       if (!mempool_offline_start)
1017 +               mempool_offline_start = mempool_start;
1018 +       if (!mempool_offline_end)
1019 +               mempool_offline_end = mempool_end;
1020 +
1021 +       if (VCADDR_TO_PHYSADDR(mempool_offline_start) != vc_cma_base) {
1022 +               loud_error_header();
1023 +               loud_error(
1024 +                       "__MEMPOOL_OFFLINE_START(%x -> %lx) doesn't match "
1025 +                       "vc_cma_base(%x)",
1026 +                       mempool_offline_start,
1027 +                       VCADDR_TO_PHYSADDR(mempool_offline_start),
1028 +                       vc_cma_base);
1029 +               success = 0;
1030 +       }
1031 +
1032 +       if (VCADDR_TO_PHYSADDR(mempool_offline_end) !=
1033 +               (vc_cma_base + vc_cma_size)) {
1034 +               loud_error_header();
1035 +               loud_error(
1036 +                       "__MEMPOOL_OFFLINE_END(%x -> %lx) doesn't match "
1037 +                       "vc_cma_base(%x) + vc_cma_size(%x) = %x",
1038 +                       mempool_offline_start,
1039 +                       VCADDR_TO_PHYSADDR(mempool_offline_end),
1040 +                       vc_cma_base, vc_cma_size, vc_cma_base + vc_cma_size);
1041 +               success = 0;
1042 +       }
1043 +
1044 +       if (mempool_end < mempool_start) {
1045 +               loud_error_header();
1046 +               loud_error(
1047 +                       "__MEMPOOL_END(%x) must not be before "
1048 +                       "__MEMPOOL_START(%x)",
1049 +                       mempool_end,
1050 +                       mempool_start);
1051 +               success = 0;
1052 +       }
1053 +
1054 +       if (mempool_offline_end < mempool_offline_start) {
1055 +               loud_error_header();
1056 +               loud_error(
1057 +                       "__MEMPOOL_OFFLINE_END(%x) must not be before "
1058 +                       "__MEMPOOL_OFFLINE_START(%x)",
1059 +                       mempool_offline_end,
1060 +                       mempool_offline_start);
1061 +               success = 0;
1062 +       }
1063 +
1064 +       if (mempool_offline_start < mempool_start) {
1065 +               loud_error_header();
1066 +               loud_error(
1067 +                       "__MEMPOOL_OFFLINE_START(%x) must not be before "
1068 +                       "__MEMPOOL_START(%x)",
1069 +                       mempool_offline_start,
1070 +                       mempool_start);
1071 +               success = 0;
1072 +       }
1073 +
1074 +       if (mempool_offline_end > mempool_end) {
1075 +               loud_error_header();
1076 +               loud_error(
1077 +                       "__MEMPOOL_OFFLINE_END(%x) must not be after "
1078 +                       "__MEMPOOL_END(%x)",
1079 +                       mempool_offline_end,
1080 +                       mempool_end);
1081 +               success = 0;
1082 +       }
1083 +
1084 +       if ((cam_alloc_base < mempool_end) &&
1085 +               (cam_alloc_end > mempool_start)) {
1086 +               loud_error_header();
1087 +               loud_error(
1088 +                       "cam_alloc pool(%x-%x) overlaps "
1089 +                       "mempool(%x-%x)",
1090 +                       cam_alloc_base, cam_alloc_end,
1091 +                       mempool_start, mempool_end);
1092 +               success = 0;
1093 +       }
1094 +
1095 +       loud_error_footer();
1096 +
1097 +close:
1098 +       CloseVideoCoreMemory(mem_hndl);
1099 +
1100 +out:
1101 +       return success;
1102 +}
1103 +#endif
1104 +
1105 +static int vc_cma_init(void)
1106 +{
1107 +       int rc = -EFAULT;
1108 +       struct device *dev;
1109 +
1110 +       if (!check_cma_config())
1111 +               goto out_release;
1112 +
1113 +       printk(KERN_INFO "vc-cma: Videocore CMA driver\n");
1114 +       printk(KERN_INFO "vc-cma: vc_cma_base      = 0x%08x\n", vc_cma_base);
1115 +       printk(KERN_INFO "vc-cma: vc_cma_size      = 0x%08x (%u MiB)\n",
1116 +              vc_cma_size, vc_cma_size / (1024 * 1024));
1117 +       printk(KERN_INFO "vc-cma: vc_cma_initial   = 0x%08x (%u MiB)\n",
1118 +              vc_cma_initial, vc_cma_initial / (1024 * 1024));
1119 +
1120 +       vc_cma_base_page = phys_to_page(vc_cma_base);
1121 +
1122 +       if (vc_cma_chunks) {
1123 +               int chunks_needed = vc_cma_initial / VC_CMA_CHUNK_SIZE;
1124 +
1125 +               for (vc_cma_chunks_used = 0;
1126 +                    vc_cma_chunks_used < chunks_needed; vc_cma_chunks_used++) {
1127 +                       struct page *chunk;
1128 +                       chunk = dma_alloc_from_contiguous(NULL /*&vc_cma_device.dev*/,
1129 +                                                         PAGES_PER_CHUNK,
1130 +                                                         VC_CMA_CHUNK_ORDER);
1131 +                       if (!chunk)
1132 +                               break;
1133 +                       BUG_ON(((page_to_phys(chunk) - vc_cma_base) %
1134 +                               VC_CMA_CHUNK_SIZE) != 0);
1135 +               }
1136 +               if (vc_cma_chunks_used != chunks_needed) {
1137 +                       LOG_ERR("%s: dma_alloc_from_contiguous failed (%d "
1138 +                               "bytes, allocation %d of %d)",
1139 +                               __func__, VC_CMA_CHUNK_SIZE,
1140 +                               vc_cma_chunks_used, chunks_needed);
1141 +                       goto out_release;
1142 +               }
1143 +
1144 +               vchiq_add_connected_callback(vc_cma_connected_init);
1145 +       }
1146 +
1147 +       rc = alloc_chrdev_region(&vc_cma_devnum, 0, 1, DRIVER_NAME);
1148 +       if (rc < 0) {
1149 +               LOG_ERR("%s: alloc_chrdev_region failed (rc=%d)", __func__, rc);
1150 +               goto out_release;
1151 +       }
1152 +
1153 +       cdev_init(&vc_cma_cdev, &vc_cma_fops);
1154 +       rc = cdev_add(&vc_cma_cdev, vc_cma_devnum, 1);
1155 +       if (rc != 0) {
1156 +               LOG_ERR("%s: cdev_add failed (rc=%d)", __func__, rc);
1157 +               goto out_unregister;
1158 +       }
1159 +
1160 +       vc_cma_class = class_create(THIS_MODULE, DRIVER_NAME);
1161 +       if (IS_ERR(vc_cma_class)) {
1162 +               rc = PTR_ERR(vc_cma_class);
1163 +               LOG_ERR("%s: class_create failed (rc=%d)", __func__, rc);
1164 +               goto out_cdev_del;
1165 +       }
1166 +
1167 +       dev = device_create(vc_cma_class, NULL, vc_cma_devnum, NULL,
1168 +                           DRIVER_NAME);
1169 +       if (IS_ERR(dev)) {
1170 +               rc = PTR_ERR(dev);
1171 +               LOG_ERR("%s: device_create failed (rc=%d)", __func__, rc);
1172 +               goto out_class_destroy;
1173 +       }
1174 +
1175 +       vc_cma_proc_entry = proc_create(DRIVER_NAME, 0444, NULL, &vc_cma_proc_fops);
1176 +       if (vc_cma_proc_entry == NULL) {
1177 +               rc = -EFAULT;
1178 +               LOG_ERR("%s: proc_create failed", __func__);
1179 +               goto out_device_destroy;
1180 +       }
1181 +    
1182 +       vc_cma_inited = 1;
1183 +       return 0;
1184 +
1185 +out_device_destroy:
1186 +       device_destroy(vc_cma_class, vc_cma_devnum);
1187 +
1188 +out_class_destroy:
1189 +       class_destroy(vc_cma_class);
1190 +       vc_cma_class = NULL;
1191 +
1192 +out_cdev_del:
1193 +       cdev_del(&vc_cma_cdev);
1194 +
1195 +out_unregister:
1196 +       unregister_chrdev_region(vc_cma_devnum, 1);
1197 +
1198 +out_release:
1199 +       /* It is tempting to try to clean up by calling
1200 +          dma_release_from_contiguous for all allocated chunks, but it isn't
1201 +          a very safe thing to do. If vc_cma_initial is non-zero it is because
1202 +          VideoCore is already using that memory, so giving it back to Linux
1203 +          is likely to be fatal.
1204 +        */
1205 +       return -1;
1206 +}
1207 +
1208 +/****************************************************************************
1209 +*
1210 +*   vc_cma_exit
1211 +*
1212 +***************************************************************************/
1213 +
1214 +static void __exit vc_cma_exit(void)
1215 +{
1216 +       LOG_DBG("%s: called", __func__);
1217 +
1218 +       if (vc_cma_inited) {
1219 +               remove_proc_entry(DRIVER_NAME, NULL);
1220 +               device_destroy(vc_cma_class, vc_cma_devnum);
1221 +               class_destroy(vc_cma_class);
1222 +               cdev_del(&vc_cma_cdev);
1223 +               unregister_chrdev_region(vc_cma_devnum, 1);
1224 +       }
1225 +}
1226 +
1227 +module_init(vc_cma_init);
1228 +module_exit(vc_cma_exit);
1229 +MODULE_LICENSE("GPL");
1230 +MODULE_AUTHOR("Broadcom Corporation");
1231 --- a/drivers/misc/Makefile
1232 +++ b/drivers/misc/Makefile
1233 @@ -53,4 +53,4 @@ obj-$(CONFIG_INTEL_MEI)               += mei/
1234  obj-$(CONFIG_VMWARE_VMCI)      += vmw_vmci/
1235  obj-$(CONFIG_LATTICE_ECP3_CONFIG)      += lattice-ecp3-config.o
1236  obj-$(CONFIG_SRAM)             += sram.o
1237 -obj-y                          += vc04_services/
1238 +obj-$(CONFIG_BCM2708_VCHIQ)    += vc04_services/
1239 --- /dev/null
1240 +++ b/include/linux/broadcom/vc_cma.h
1241 @@ -0,0 +1,30 @@
1242 +/*****************************************************************************
1243 +* Copyright 2012 Broadcom Corporation.  All rights reserved.
1244 +*
1245 +* Unless you and Broadcom execute a separate written software license
1246 +* agreement governing use of this software, this software is licensed to you
1247 +* under the terms of the GNU General Public License version 2, available at
1248 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
1249 +*
1250 +* Notwithstanding the above, under no circumstances may you combine this
1251 +* software in any way with any other Broadcom software provided under a
1252 +* license other than the GPL, without Broadcom's express prior written
1253 +* consent.
1254 +*****************************************************************************/
1255 +
1256 +#if !defined( VC_CMA_H )
1257 +#define VC_CMA_H
1258 +
1259 +#include <linux/ioctl.h>
1260 +
1261 +#define VC_CMA_IOC_MAGIC 0xc5
1262 +
1263 +#define VC_CMA_IOC_RESERVE _IO(VC_CMA_IOC_MAGIC, 0)
1264 +
1265 +#ifdef __KERNEL__
1266 +extern void __init vc_cma_early_init(void);
1267 +extern void __init vc_cma_reserve(void);
1268 +#endif
1269 +
1270 +#endif /* VC_CMA_H */
1271 +