1 diff -urN linux-3.10/drivers/char/broadcom/Kconfig linux-rpi-3.10.y/drivers/char/broadcom/Kconfig
2 --- linux-3.10/drivers/char/broadcom/Kconfig 1970-01-01 01:00:00.000000000 +0100
3 +++ linux-rpi-3.10.y/drivers/char/broadcom/Kconfig 2013-07-06 15:25:50.000000000 +0100
6 +# Broadcom char driver config
9 +menuconfig BRCM_CHAR_DRIVERS
10 + tristate "Broadcom Char Drivers"
13 + Broadcom's char drivers
16 + bool "Videocore CMA"
20 + Helper for videocore CMA access.
22 diff -urN linux-3.10/drivers/char/broadcom/Makefile linux-rpi-3.10.y/drivers/char/broadcom/Makefile
23 --- linux-3.10/drivers/char/broadcom/Makefile 1970-01-01 01:00:00.000000000 +0100
24 +++ linux-rpi-3.10.y/drivers/char/broadcom/Makefile 2013-07-06 15:25:50.000000000 +0100
26 +obj-$(CONFIG_BCM_VC_CMA) += vc_cma/
28 diff -urN linux-3.10/drivers/char/broadcom/vc_cma/Makefile linux-rpi-3.10.y/drivers/char/broadcom/vc_cma/Makefile
29 --- linux-3.10/drivers/char/broadcom/vc_cma/Makefile 1970-01-01 01:00:00.000000000 +0100
30 +++ linux-rpi-3.10.y/drivers/char/broadcom/vc_cma/Makefile 2013-07-06 15:25:50.000000000 +0100
32 +EXTRA_CFLAGS += -Wall -Wstrict-prototypes -Wno-trigraphs
33 +EXTRA_CFLAGS += -Werror
34 +EXTRA_CFLAGS += -I"include/linux/broadcom"
35 +EXTRA_CFLAGS += -I"drivers/misc/vc04_services"
36 +EXTRA_CFLAGS += -I"drivers/misc/vc04_services/interface/vchi"
37 +EXTRA_CFLAGS += -I"drivers/misc/vc04_services/interface/vchiq_arm"
39 +EXTRA_CFLAGS += -D__KERNEL__
40 +EXTRA_CFLAGS += -D__linux__
41 +EXTRA_CFLAGS += -Werror
43 +obj-$(CONFIG_BCM_VC_CMA) += vc-cma.o
45 +vc-cma-objs := vc_cma.o
47 diff -urN linux-3.10/drivers/char/broadcom/vc_cma/vc_cma.c linux-rpi-3.10.y/drivers/char/broadcom/vc_cma/vc_cma.c
48 --- linux-3.10/drivers/char/broadcom/vc_cma/vc_cma.c 1970-01-01 01:00:00.000000000 +0100
49 +++ linux-rpi-3.10.y/drivers/char/broadcom/vc_cma/vc_cma.c 2013-07-06 15:25:50.000000000 +0100
52 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
54 + * Redistribution and use in source and binary forms, with or without
55 + * modification, are permitted provided that the following conditions
57 + * 1. Redistributions of source code must retain the above copyright
58 + * notice, this list of conditions, and the following disclaimer,
59 + * without modification.
60 + * 2. Redistributions in binary form must reproduce the above copyright
61 + * notice, this list of conditions and the following disclaimer in the
62 + * documentation and/or other materials provided with the distribution.
63 + * 3. The names of the above-listed copyright holders may not be used
64 + * to endorse or promote products derived from this software without
65 + * specific prior written permission.
67 + * ALTERNATIVELY, this software may be distributed under the terms of the
68 + * GNU General Public License ("GPL") version 2, as published by the Free
69 + * Software Foundation.
71 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
72 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
73 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
74 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
75 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
76 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
77 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
78 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
79 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
80 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
81 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 +#include <linux/kernel.h>
85 +#include <linux/module.h>
86 +#include <linux/kthread.h>
87 +#include <linux/fs.h>
88 +#include <linux/device.h>
89 +#include <linux/cdev.h>
90 +#include <linux/mm.h>
91 +#include <linux/proc_fs.h>
92 +#include <linux/seq_file.h>
93 +#include <linux/dma-mapping.h>
94 +#include <linux/dma-contiguous.h>
95 +#include <linux/platform_device.h>
96 +#include <linux/uaccess.h>
97 +#include <asm/cacheflush.h>
101 +#include "vchiq_util.h"
102 +#include "vchiq_connected.h"
103 +//#include "debug_sym.h"
104 +//#include "vc_mem.h"
106 +#define DRIVER_NAME "vc-cma"
108 +#define LOG_DBG(fmt, ...) \
109 + if (vc_cma_debug) \
110 + printk(KERN_INFO fmt "\n", ##__VA_ARGS__)
111 +#define LOG_ERR(fmt, ...) \
112 + printk(KERN_ERR fmt "\n", ##__VA_ARGS__)
114 +#define VC_CMA_FOURCC VCHIQ_MAKE_FOURCC('C', 'M', 'A', ' ')
115 +#define VC_CMA_VERSION 2
117 +#define VC_CMA_CHUNK_ORDER 6 /* 256K */
118 +#define VC_CMA_CHUNK_SIZE (4096 << VC_CMA_CHUNK_ORDER)
119 +#define VC_CMA_MAX_PARAMS_PER_MSG \
120 + ((VCHIQ_MAX_MSG_SIZE - sizeof(unsigned short))/sizeof(unsigned short))
121 +#define VC_CMA_RESERVE_COUNT_MAX 16
123 +#define PAGES_PER_CHUNK (VC_CMA_CHUNK_SIZE / PAGE_SIZE)
125 +#define VCADDR_TO_PHYSADDR(vcaddr) (mm_vc_mem_phys_addr + vcaddr)
127 +#define loud_error(...) \
128 + LOG_ERR("===== " __VA_ARGS__)
134 + VC_CMA_MSG_ALLOC, /* chunk count */
135 + VC_CMA_MSG_FREE, /* chunk, chunk, ... */
136 + VC_CMA_MSG_ALLOCATED, /* chunk, chunk, ... */
137 + VC_CMA_MSG_REQUEST_ALLOC, /* chunk count */
138 + VC_CMA_MSG_REQUEST_FREE, /* chunk count */
139 + VC_CMA_MSG_RESERVE, /* bytes lo, bytes hi */
140 + VC_CMA_MSG_UPDATE_RESERVE,
145 + unsigned short type;
146 + unsigned short params[VC_CMA_MAX_PARAMS_PER_MSG];
149 +struct vc_cma_reserve_user {
151 + unsigned int reserve;
154 +/* Device (/dev) related variables */
155 +static dev_t vc_cma_devnum;
156 +static struct class *vc_cma_class;
157 +static struct cdev vc_cma_cdev;
158 +static int vc_cma_inited;
159 +static int vc_cma_debug;
162 +static struct proc_dir_entry *vc_cma_proc_entry;
164 +phys_addr_t vc_cma_base;
165 +struct page *vc_cma_base_page;
166 +unsigned int vc_cma_size;
167 +EXPORT_SYMBOL(vc_cma_size);
168 +unsigned int vc_cma_initial;
169 +unsigned int vc_cma_chunks;
170 +unsigned int vc_cma_chunks_used;
171 +unsigned int vc_cma_chunks_reserved;
173 +static int in_loud_error;
175 +unsigned int vc_cma_reserve_total;
176 +unsigned int vc_cma_reserve_count;
177 +struct vc_cma_reserve_user vc_cma_reserve_users[VC_CMA_RESERVE_COUNT_MAX];
178 +static DEFINE_SEMAPHORE(vc_cma_reserve_mutex);
179 +static DEFINE_SEMAPHORE(vc_cma_worker_queue_push_mutex);
181 +static u64 vc_cma_dma_mask = DMA_BIT_MASK(32);
182 +static struct platform_device vc_cma_device = {
186 + .dma_mask = &vc_cma_dma_mask,
187 + .coherent_dma_mask = DMA_BIT_MASK(32),
191 +static VCHIQ_INSTANCE_T cma_instance;
192 +static VCHIQ_SERVICE_HANDLE_T cma_service;
193 +static VCHIU_QUEUE_T cma_msg_queue;
194 +static struct task_struct *cma_worker;
196 +static int vc_cma_set_reserve(unsigned int reserve, unsigned int pid);
197 +static int vc_cma_alloc_chunks(int num_chunks, struct cma_msg *reply);
198 +static VCHIQ_STATUS_T cma_service_callback(VCHIQ_REASON_T reason,
199 + VCHIQ_HEADER_T * header,
200 + VCHIQ_SERVICE_HANDLE_T service,
201 + void *bulk_userdata);
202 +static void send_vc_msg(unsigned short type,
203 + unsigned short param1, unsigned short param2);
204 +static bool send_worker_msg(VCHIQ_HEADER_T * msg);
206 +static int early_vc_cma_mem(char *p)
208 + unsigned int new_size;
209 + printk(KERN_NOTICE "early_vc_cma_mem(%s)", p);
210 + vc_cma_size = memparse(p, &p);
211 + vc_cma_initial = vc_cma_size;
213 + vc_cma_size = memparse(p + 1, &p);
215 + vc_cma_base = memparse(p + 1, &p);
217 + new_size = (vc_cma_size - ((-vc_cma_base) & (VC_CMA_CHUNK_SIZE - 1)))
218 + & ~(VC_CMA_CHUNK_SIZE - 1);
219 + if (new_size > vc_cma_size)
221 + vc_cma_initial = (vc_cma_initial + VC_CMA_CHUNK_SIZE - 1)
222 + & ~(VC_CMA_CHUNK_SIZE - 1);
223 + if (vc_cma_initial > vc_cma_size)
224 + vc_cma_initial = vc_cma_size;
225 + vc_cma_base = (vc_cma_base + VC_CMA_CHUNK_SIZE - 1)
226 + & ~(VC_CMA_CHUNK_SIZE - 1);
228 + printk(KERN_NOTICE " -> initial %x, size %x, base %x", vc_cma_initial,
229 + vc_cma_size, (unsigned int)vc_cma_base);
234 +early_param("vc-cma-mem", early_vc_cma_mem);
236 +void vc_cma_early_init(void)
238 + LOG_DBG("vc_cma_early_init - vc_cma_chunks = %d", vc_cma_chunks);
240 + int rc = platform_device_register(&vc_cma_device);
241 + LOG_DBG("platform_device_register -> %d", rc);
245 +void vc_cma_reserve(void)
247 + /* if vc_cma_size is set, then declare vc CMA area of the same
248 + * size from the end of memory
251 + if (dma_declare_contiguous(NULL /*&vc_cma_device.dev*/, vc_cma_size,
252 + vc_cma_base, 0) == 0) {
254 + LOG_ERR("vc_cma: dma_declare_contiguous(%x,%x) failed",
255 + vc_cma_size, (unsigned int)vc_cma_base);
259 + vc_cma_chunks = vc_cma_size / VC_CMA_CHUNK_SIZE;
262 +/****************************************************************************
266 +***************************************************************************/
268 +static int vc_cma_open(struct inode *inode, struct file *file)
276 +/****************************************************************************
280 +***************************************************************************/
282 +static int vc_cma_release(struct inode *inode, struct file *file)
287 + vc_cma_set_reserve(0, current->tgid);
292 +/****************************************************************************
296 +***************************************************************************/
298 +static long vc_cma_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
306 + case VC_CMA_IOC_RESERVE:
307 + rc = vc_cma_set_reserve((unsigned int)arg, current->tgid);
312 + LOG_ERR("vc-cma: Unknown ioctl %x", cmd);
319 +/****************************************************************************
321 +* File Operations for the driver.
323 +***************************************************************************/
325 +static const struct file_operations vc_cma_fops = {
326 + .owner = THIS_MODULE,
327 + .open = vc_cma_open,
328 + .release = vc_cma_release,
329 + .unlocked_ioctl = vc_cma_ioctl,
332 +/****************************************************************************
336 +***************************************************************************/
338 +static int vc_cma_show_info(struct seq_file *m, void *v)
342 + seq_printf(m, "Videocore CMA:\n");
343 + seq_printf(m, " Base : %08x\n", (unsigned int)vc_cma_base);
344 + seq_printf(m, " Length : %08x\n", vc_cma_size);
345 + seq_printf(m, " Initial : %08x\n", vc_cma_initial);
346 + seq_printf(m, " Chunk size : %08x\n", VC_CMA_CHUNK_SIZE);
347 + seq_printf(m, " Chunks : %4d (%d bytes)\n",
348 + (int)vc_cma_chunks,
349 + (int)(vc_cma_chunks * VC_CMA_CHUNK_SIZE));
350 + seq_printf(m, " Used : %4d (%d bytes)\n",
351 + (int)vc_cma_chunks_used,
352 + (int)(vc_cma_chunks_used * VC_CMA_CHUNK_SIZE));
353 + seq_printf(m, " Reserved : %4d (%d bytes)\n",
354 + (unsigned int)vc_cma_chunks_reserved,
355 + (int)(vc_cma_chunks_reserved * VC_CMA_CHUNK_SIZE));
357 + for (i = 0; i < vc_cma_reserve_count; i++) {
358 + struct vc_cma_reserve_user *user = &vc_cma_reserve_users[i];
359 + seq_printf(m, " PID %5d: %d bytes\n", user->pid,
363 + seq_printf(m, "\n");
368 +static int vc_cma_proc_open(struct inode *inode, struct file *file)
370 + return single_open(file, vc_cma_show_info, NULL);
373 +/****************************************************************************
377 +***************************************************************************/
379 +static int vc_cma_proc_write(struct file *file,
380 + const char __user *buffer,
381 + size_t size, loff_t *ppos)
384 + char input_str[20];
386 + memset(input_str, 0, sizeof(input_str));
388 + if (size > sizeof(input_str)) {
389 + LOG_ERR("%s: input string length too long", __func__);
393 + if (copy_from_user(input_str, buffer, size - 1)) {
394 + LOG_ERR("%s: failed to get input string", __func__);
397 +#define ALLOC_STR "alloc"
398 +#define FREE_STR "free"
399 +#define DEBUG_STR "debug"
400 +#define RESERVE_STR "reserve"
401 + if (strncmp(input_str, ALLOC_STR, strlen(ALLOC_STR)) == 0) {
403 + char *p = input_str + strlen(ALLOC_STR);
407 + size = memparse(p, NULL);
408 + LOG_ERR("/proc/vc-cma: alloc %d", size);
410 + send_vc_msg(VC_CMA_MSG_REQUEST_FREE,
411 + size / VC_CMA_CHUNK_SIZE, 0);
413 + LOG_ERR("invalid size '%s'", p);
415 + } else if (strncmp(input_str, FREE_STR, strlen(FREE_STR)) == 0) {
417 + char *p = input_str + strlen(FREE_STR);
421 + size = memparse(p, NULL);
422 + LOG_ERR("/proc/vc-cma: free %d", size);
424 + send_vc_msg(VC_CMA_MSG_REQUEST_ALLOC,
425 + size / VC_CMA_CHUNK_SIZE, 0);
427 + LOG_ERR("invalid size '%s'", p);
429 + } else if (strncmp(input_str, DEBUG_STR, strlen(DEBUG_STR)) == 0) {
430 + char *p = input_str + strlen(DEBUG_STR);
433 + if ((strcmp(p, "on") == 0) || (strcmp(p, "1") == 0))
435 + else if ((strcmp(p, "off") == 0) || (strcmp(p, "0") == 0))
437 + LOG_ERR("/proc/vc-cma: debug %s", vc_cma_debug ? "on" : "off");
439 + } else if (strncmp(input_str, RESERVE_STR, strlen(RESERVE_STR)) == 0) {
442 + char *p = input_str + strlen(RESERVE_STR);
445 + size = memparse(p, NULL);
447 + reserved = vc_cma_set_reserve(size, current->tgid);
448 + rc = (reserved >= 0) ? size : reserved;
455 +/****************************************************************************
457 +* File Operations for /proc interface.
459 +***************************************************************************/
461 +static const struct file_operations vc_cma_proc_fops = {
462 + .open = vc_cma_proc_open,
464 + .write = vc_cma_proc_write,
465 + .llseek = seq_lseek,
466 + .release = single_release
469 +static int vc_cma_set_reserve(unsigned int reserve, unsigned int pid)
471 + struct vc_cma_reserve_user *user = NULL;
475 + if (down_interruptible(&vc_cma_reserve_mutex))
476 + return -ERESTARTSYS;
478 + for (i = 0; i < vc_cma_reserve_count; i++) {
479 + if (pid == vc_cma_reserve_users[i].pid) {
480 + user = &vc_cma_reserve_users[i];
481 + delta = reserve - user->reserve;
483 + user->reserve = reserve;
485 + /* Remove this entry by copying downwards */
486 + while ((i + 1) < vc_cma_reserve_count) {
487 + user[0].pid = user[1].pid;
488 + user[0].reserve = user[1].reserve;
492 + vc_cma_reserve_count--;
499 + if (reserve && !user) {
500 + if (vc_cma_reserve_count == VC_CMA_RESERVE_COUNT_MAX) {
501 + LOG_ERR("vc-cma: Too many reservations - "
502 + "increase CMA_RESERVE_COUNT_MAX");
503 + up(&vc_cma_reserve_mutex);
506 + user = &vc_cma_reserve_users[vc_cma_reserve_count];
508 + user->reserve = reserve;
510 + vc_cma_reserve_count++;
513 + vc_cma_reserve_total += delta;
515 + send_vc_msg(VC_CMA_MSG_RESERVE,
516 + vc_cma_reserve_total & 0xffff, vc_cma_reserve_total >> 16);
518 + send_worker_msg((VCHIQ_HEADER_T *) VC_CMA_MSG_UPDATE_RESERVE);
520 + LOG_DBG("/proc/vc-cma: reserve %d (PID %d) - total %u",
521 + reserve, pid, vc_cma_reserve_total);
523 + up(&vc_cma_reserve_mutex);
525 + return vc_cma_reserve_total;
528 +static VCHIQ_STATUS_T cma_service_callback(VCHIQ_REASON_T reason,
529 + VCHIQ_HEADER_T * header,
530 + VCHIQ_SERVICE_HANDLE_T service,
531 + void *bulk_userdata)
534 + case VCHIQ_MESSAGE_AVAILABLE:
535 + if (!send_worker_msg(header))
536 + return VCHIQ_RETRY;
538 + case VCHIQ_SERVICE_CLOSED:
539 + LOG_DBG("CMA service closed");
542 + LOG_ERR("Unexpected CMA callback reason %d", reason);
545 + return VCHIQ_SUCCESS;
548 +static void send_vc_msg(unsigned short type,
549 + unsigned short param1, unsigned short param2)
551 + unsigned short msg[] = { type, param1, param2 };
552 + VCHIQ_ELEMENT_T elem = { &msg, sizeof(msg) };
553 + VCHIQ_STATUS_T ret;
554 + vchiq_use_service(cma_service);
555 + ret = vchiq_queue_message(cma_service, &elem, 1);
556 + vchiq_release_service(cma_service);
557 + if (ret != VCHIQ_SUCCESS)
558 + LOG_ERR("vchiq_queue_message returned %x", ret);
561 +static bool send_worker_msg(VCHIQ_HEADER_T * msg)
563 + if (down_interruptible(&vc_cma_worker_queue_push_mutex))
565 + vchiu_queue_push(&cma_msg_queue, msg);
566 + up(&vc_cma_worker_queue_push_mutex);
570 +static int vc_cma_alloc_chunks(int num_chunks, struct cma_msg *reply)
573 + for (i = 0; i < num_chunks; i++) {
574 + struct page *chunk;
575 + unsigned int chunk_num;
576 + uint8_t *chunk_addr;
577 + size_t chunk_size = PAGES_PER_CHUNK << PAGE_SHIFT;
579 + chunk = dma_alloc_from_contiguous(NULL /*&vc_cma_device.dev*/,
581 + VC_CMA_CHUNK_ORDER);
585 + chunk_addr = page_address(chunk);
586 + dmac_flush_range(chunk_addr, chunk_addr + chunk_size);
587 + outer_inv_range(__pa(chunk_addr), __pa(chunk_addr) +
591 + (page_to_phys(chunk) - vc_cma_base) / VC_CMA_CHUNK_SIZE;
592 + BUG_ON(((page_to_phys(chunk) - vc_cma_base) %
593 + VC_CMA_CHUNK_SIZE) != 0);
594 + if (chunk_num >= vc_cma_chunks) {
595 + LOG_ERR("%s: ===============================",
597 + LOG_ERR("%s: chunk phys %x, vc_cma %x-%x - "
598 + "bad SPARSEMEM configuration?",
599 + __func__, (unsigned int)page_to_phys(chunk),
600 + vc_cma_base, vc_cma_base + vc_cma_size - 1);
601 + LOG_ERR("%s: dev->cma_area = %p\n", __func__,
602 + vc_cma_device.dev.cma_area);
603 + LOG_ERR("%s: ===============================",
607 + reply->params[i] = chunk_num;
608 + vc_cma_chunks_used++;
611 + if (i < num_chunks) {
612 + LOG_ERR("%s: dma_alloc_from_contiguous failed "
613 + "for %x bytes (alloc %d of %d, %d free)",
614 + __func__, VC_CMA_CHUNK_SIZE, i,
615 + num_chunks, vc_cma_chunks - vc_cma_chunks_used);
619 + LOG_DBG("CMA allocated %d chunks -> %d used",
620 + num_chunks, vc_cma_chunks_used);
621 + reply->type = VC_CMA_MSG_ALLOCATED;
624 + VCHIQ_ELEMENT_T elem = {
626 + offsetof(struct cma_msg, params[0]) +
627 + num_chunks * sizeof(reply->params[0])
629 + VCHIQ_STATUS_T ret;
630 + vchiq_use_service(cma_service);
631 + ret = vchiq_queue_message(cma_service, &elem, 1);
632 + vchiq_release_service(cma_service);
633 + if (ret != VCHIQ_SUCCESS)
634 + LOG_ERR("vchiq_queue_message return " "%x", ret);
640 +static int cma_worker_proc(void *param)
642 + static struct cma_msg reply;
646 + VCHIQ_HEADER_T *msg;
647 + static struct cma_msg msg_copy;
648 + struct cma_msg *cma_msg = &msg_copy;
649 + int type, msg_size;
651 + msg = vchiu_queue_pop(&cma_msg_queue);
652 + if ((unsigned int)msg >= VC_CMA_MSG_MAX) {
653 + msg_size = msg->size;
654 + memcpy(&msg_copy, msg->data, msg_size);
655 + type = cma_msg->type;
656 + vchiq_release_message(cma_service, msg);
660 + if (type == VC_CMA_MSG_QUIT)
662 + else if (type == VC_CMA_MSG_UPDATE_RESERVE) {
672 + case VC_CMA_MSG_ALLOC:{
673 + int num_chunks, free_chunks;
674 + num_chunks = cma_msg->params[0];
676 + vc_cma_chunks - vc_cma_chunks_used;
677 + LOG_DBG("CMA_MSG_ALLOC(%d chunks)", num_chunks);
678 + if (num_chunks > VC_CMA_MAX_PARAMS_PER_MSG) {
680 + ("CMA_MSG_ALLOC - chunk count (%d) "
681 + "exceeds VC_CMA_MAX_PARAMS_PER_MSG (%d)",
683 + VC_CMA_MAX_PARAMS_PER_MSG);
684 + num_chunks = VC_CMA_MAX_PARAMS_PER_MSG;
687 + if (num_chunks > free_chunks) {
689 + ("CMA_MSG_ALLOC - chunk count (%d) "
690 + "exceeds free chunks (%d)",
691 + num_chunks, free_chunks);
692 + num_chunks = free_chunks;
695 + vc_cma_alloc_chunks(num_chunks, &reply);
699 + case VC_CMA_MSG_FREE:{
702 + offsetof(struct cma_msg,
704 + sizeof(cma_msg->params[0]);
706 + BUG_ON(chunk_count <= 0);
708 + LOG_DBG("CMA_MSG_FREE(%d chunks - %x, ...)",
709 + chunk_count, cma_msg->params[0]);
710 + for (i = 0; i < chunk_count; i++) {
711 + int chunk_num = cma_msg->params[i];
712 + struct page *page = vc_cma_base_page +
713 + chunk_num * PAGES_PER_CHUNK;
714 + if (chunk_num >= vc_cma_chunks) {
716 + ("CMA_MSG_FREE - chunk %d of %d"
717 + " (value %x) exceeds maximum "
718 + "(%x)", i, chunk_count,
720 + vc_cma_chunks - 1);
724 + if (!dma_release_from_contiguous
725 + (NULL /*&vc_cma_device.dev*/, page,
726 + PAGES_PER_CHUNK)) {
728 + ("CMA_MSG_FREE - failed to "
729 + "release chunk %d (phys %x, "
730 + "page %x)", chunk_num,
731 + page_to_phys(page),
732 + (unsigned int)page);
734 + vc_cma_chunks_used--;
736 + LOG_DBG("CMA released %d chunks -> %d used",
737 + i, vc_cma_chunks_used);
741 + case VC_CMA_MSG_UPDATE_RESERVE:{
742 + int chunks_needed =
743 + ((vc_cma_reserve_total + VC_CMA_CHUNK_SIZE -
745 + / VC_CMA_CHUNK_SIZE) -
746 + vc_cma_chunks_reserved;
749 + ("CMA_MSG_UPDATE_RESERVE(%d chunks needed)",
752 + /* Cap the reservations to what is available */
753 + if (chunks_needed > 0) {
754 + if (chunks_needed >
756 + vc_cma_chunks_used))
759 + vc_cma_chunks_used);
762 + vc_cma_alloc_chunks(chunks_needed,
767 + ("CMA_MSG_UPDATE_RESERVE(%d chunks allocated)",
769 + vc_cma_chunks_reserved += chunks_needed;
774 + LOG_ERR("unexpected msg type %d", type);
779 + LOG_DBG("quitting...");
783 +/****************************************************************************
785 +* vc_cma_connected_init
787 +* This function is called once the videocore has been connected.
789 +***************************************************************************/
791 +static void vc_cma_connected_init(void)
793 + VCHIQ_SERVICE_PARAMS_T service_params;
795 + LOG_DBG("vc_cma_connected_init");
797 + if (!vchiu_queue_init(&cma_msg_queue, 16)) {
798 + LOG_ERR("could not create CMA msg queue");
802 + if (vchiq_initialise(&cma_instance) != VCHIQ_SUCCESS)
803 + goto fail_vchiq_init;
805 + vchiq_connect(cma_instance);
807 + service_params.fourcc = VC_CMA_FOURCC;
808 + service_params.callback = cma_service_callback;
809 + service_params.userdata = NULL;
810 + service_params.version = VC_CMA_VERSION;
811 + service_params.version_min = VC_CMA_VERSION;
813 + if (vchiq_open_service(cma_instance, &service_params,
814 + &cma_service) != VCHIQ_SUCCESS) {
815 + LOG_ERR("failed to open service - already in use?");
816 + goto fail_vchiq_open;
819 + vchiq_release_service(cma_service);
821 + cma_worker = kthread_create(cma_worker_proc, NULL, "cma_worker");
823 + LOG_ERR("could not create CMA worker thread");
826 + set_user_nice(cma_worker, -20);
827 + wake_up_process(cma_worker);
832 + vchiq_close_service(cma_service);
834 + vchiq_shutdown(cma_instance);
836 + vchiu_queue_delete(&cma_msg_queue);
842 +loud_error_header(void)
847 + LOG_ERR("============================================================"
848 + "================");
849 + LOG_ERR("============================================================"
850 + "================");
857 +loud_error_footer(void)
859 + if (!in_loud_error)
863 + LOG_ERR("============================================================"
864 + "================");
865 + LOG_ERR("============================================================"
866 + "================");
872 +static int check_cma_config(void) { return 1; }
875 +read_vc_debug_var(VC_MEM_ACCESS_HANDLE_T handle,
876 + const char *symbol,
877 + void *buf, size_t bufsize)
879 + VC_MEM_ADDR_T vcMemAddr;
884 + if (!LookupVideoCoreSymbol(handle, symbol,
887 + loud_error_header();
889 + "failed to find VC symbol \"%s\".",
891 + loud_error_footer();
895 + if (vcMemSize != bufsize) {
896 + loud_error_header();
898 + "VC symbol \"%s\" is the wrong size.",
900 + loud_error_footer();
904 + vcMapAddr = (off_t)vcMemAddr & VC_MEM_TO_ARM_ADDR_MASK;
905 + vcMapAddr += mm_vc_mem_phys_addr;
906 + mapAddr = ioremap_nocache(vcMapAddr, vcMemSize);
907 + if (mapAddr == 0) {
908 + loud_error_header();
910 + "failed to ioremap \"%s\" @ 0x%x "
911 + "(phys: 0x%x, size: %u).",
913 + (unsigned int)vcMapAddr,
914 + (unsigned int)vcMemAddr,
915 + (unsigned int)vcMemSize);
916 + loud_error_footer();
920 + memcpy(buf, mapAddr, bufsize);
928 +check_cma_config(void)
930 + VC_MEM_ACCESS_HANDLE_T mem_hndl;
931 + VC_MEM_ADDR_T mempool_start;
932 + VC_MEM_ADDR_T mempool_end;
933 + VC_MEM_ADDR_T mempool_offline_start;
934 + VC_MEM_ADDR_T mempool_offline_end;
935 + VC_MEM_ADDR_T cam_alloc_base;
936 + VC_MEM_ADDR_T cam_alloc_size;
937 + VC_MEM_ADDR_T cam_alloc_end;
940 + if (OpenVideoCoreMemory(&mem_hndl) != 0)
943 + /* Read the relevant VideoCore variables */
944 + if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_START",
946 + sizeof(mempool_start)))
949 + if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_END",
951 + sizeof(mempool_end)))
954 + if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_OFFLINE_START",
955 + &mempool_offline_start,
956 + sizeof(mempool_offline_start)))
959 + if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_OFFLINE_END",
960 + &mempool_offline_end,
961 + sizeof(mempool_offline_end)))
964 + if (!read_vc_debug_var(mem_hndl, "cam_alloc_base",
966 + sizeof(cam_alloc_base)))
969 + if (!read_vc_debug_var(mem_hndl, "cam_alloc_size",
971 + sizeof(cam_alloc_size)))
974 + cam_alloc_end = cam_alloc_base + cam_alloc_size;
978 + /* Now the sanity checks */
979 + if (!mempool_offline_start)
980 + mempool_offline_start = mempool_start;
981 + if (!mempool_offline_end)
982 + mempool_offline_end = mempool_end;
984 + if (VCADDR_TO_PHYSADDR(mempool_offline_start) != vc_cma_base) {
985 + loud_error_header();
987 + "__MEMPOOL_OFFLINE_START(%x -> %lx) doesn't match "
989 + mempool_offline_start,
990 + VCADDR_TO_PHYSADDR(mempool_offline_start),
995 + if (VCADDR_TO_PHYSADDR(mempool_offline_end) !=
996 + (vc_cma_base + vc_cma_size)) {
997 + loud_error_header();
999 + "__MEMPOOL_OFFLINE_END(%x -> %lx) doesn't match "
1000 + "vc_cma_base(%x) + vc_cma_size(%x) = %x",
1001 + mempool_offline_start,
1002 + VCADDR_TO_PHYSADDR(mempool_offline_end),
1003 + vc_cma_base, vc_cma_size, vc_cma_base + vc_cma_size);
1007 + if (mempool_end < mempool_start) {
1008 + loud_error_header();
1010 + "__MEMPOOL_END(%x) must not be before "
1011 + "__MEMPOOL_START(%x)",
1017 + if (mempool_offline_end < mempool_offline_start) {
1018 + loud_error_header();
1020 + "__MEMPOOL_OFFLINE_END(%x) must not be before "
1021 + "__MEMPOOL_OFFLINE_START(%x)",
1022 + mempool_offline_end,
1023 + mempool_offline_start);
1027 + if (mempool_offline_start < mempool_start) {
1028 + loud_error_header();
1030 + "__MEMPOOL_OFFLINE_START(%x) must not be before "
1031 + "__MEMPOOL_START(%x)",
1032 + mempool_offline_start,
1037 + if (mempool_offline_end > mempool_end) {
1038 + loud_error_header();
1040 + "__MEMPOOL_OFFLINE_END(%x) must not be after "
1041 + "__MEMPOOL_END(%x)",
1042 + mempool_offline_end,
1047 + if ((cam_alloc_base < mempool_end) &&
1048 + (cam_alloc_end > mempool_start)) {
1049 + loud_error_header();
1051 + "cam_alloc pool(%x-%x) overlaps "
1053 + cam_alloc_base, cam_alloc_end,
1054 + mempool_start, mempool_end);
1058 + loud_error_footer();
1061 + CloseVideoCoreMemory(mem_hndl);
1068 +static int vc_cma_init(void)
1071 + struct device *dev;
1073 + if (!check_cma_config())
1076 + printk(KERN_INFO "vc-cma: Videocore CMA driver\n");
1077 + printk(KERN_INFO "vc-cma: vc_cma_base = 0x%08x\n", vc_cma_base);
1078 + printk(KERN_INFO "vc-cma: vc_cma_size = 0x%08x (%u MiB)\n",
1079 + vc_cma_size, vc_cma_size / (1024 * 1024));
1080 + printk(KERN_INFO "vc-cma: vc_cma_initial = 0x%08x (%u MiB)\n",
1081 + vc_cma_initial, vc_cma_initial / (1024 * 1024));
1083 + vc_cma_base_page = phys_to_page(vc_cma_base);
1085 + if (vc_cma_chunks) {
1086 + int chunks_needed = vc_cma_initial / VC_CMA_CHUNK_SIZE;
1088 + for (vc_cma_chunks_used = 0;
1089 + vc_cma_chunks_used < chunks_needed; vc_cma_chunks_used++) {
1090 + struct page *chunk;
1091 + chunk = dma_alloc_from_contiguous(NULL /*&vc_cma_device.dev*/,
1093 + VC_CMA_CHUNK_ORDER);
1096 + BUG_ON(((page_to_phys(chunk) - vc_cma_base) %
1097 + VC_CMA_CHUNK_SIZE) != 0);
1099 + if (vc_cma_chunks_used != chunks_needed) {
1100 + LOG_ERR("%s: dma_alloc_from_contiguous failed (%d "
1101 + "bytes, allocation %d of %d)",
1102 + __func__, VC_CMA_CHUNK_SIZE,
1103 + vc_cma_chunks_used, chunks_needed);
1107 + vchiq_add_connected_callback(vc_cma_connected_init);
1110 + rc = alloc_chrdev_region(&vc_cma_devnum, 0, 1, DRIVER_NAME);
1112 + LOG_ERR("%s: alloc_chrdev_region failed (rc=%d)", __func__, rc);
1116 + cdev_init(&vc_cma_cdev, &vc_cma_fops);
1117 + rc = cdev_add(&vc_cma_cdev, vc_cma_devnum, 1);
1119 + LOG_ERR("%s: cdev_add failed (rc=%d)", __func__, rc);
1120 + goto out_unregister;
1123 + vc_cma_class = class_create(THIS_MODULE, DRIVER_NAME);
1124 + if (IS_ERR(vc_cma_class)) {
1125 + rc = PTR_ERR(vc_cma_class);
1126 + LOG_ERR("%s: class_create failed (rc=%d)", __func__, rc);
1127 + goto out_cdev_del;
1130 + dev = device_create(vc_cma_class, NULL, vc_cma_devnum, NULL,
1132 + if (IS_ERR(dev)) {
1133 + rc = PTR_ERR(dev);
1134 + LOG_ERR("%s: device_create failed (rc=%d)", __func__, rc);
1135 + goto out_class_destroy;
1138 + vc_cma_proc_entry = create_proc_entry(DRIVER_NAME, 0444, NULL);
1139 + if (vc_cma_proc_entry == NULL) {
1141 + LOG_ERR("%s: create_proc_entry failed", __func__);
1142 + goto out_device_destroy;
1145 + vc_cma_proc_entry->proc_fops = &vc_cma_proc_fops;
1147 + vc_cma_inited = 1;
1150 +out_device_destroy:
1151 + device_destroy(vc_cma_class, vc_cma_devnum);
1154 + class_destroy(vc_cma_class);
1155 + vc_cma_class = NULL;
1158 + cdev_del(&vc_cma_cdev);
1161 + unregister_chrdev_region(vc_cma_devnum, 1);
1164 + /* It is tempting to try to clean up by calling
1165 + dma_release_from_contiguous for all allocated chunks, but it isn't
1166 + a very safe thing to do. If vc_cma_initial is non-zero it is because
1167 + VideoCore is already using that memory, so giving it back to Linux
1168 + is likely to be fatal.
1173 +/****************************************************************************
1177 +***************************************************************************/
1179 +static void __exit vc_cma_exit(void)
1181 + LOG_DBG("%s: called", __func__);
1183 + if (vc_cma_inited) {
1184 + remove_proc_entry(vc_cma_proc_entry->name, NULL);
1185 + device_destroy(vc_cma_class, vc_cma_devnum);
1186 + class_destroy(vc_cma_class);
1187 + cdev_del(&vc_cma_cdev);
1188 + unregister_chrdev_region(vc_cma_devnum, 1);
1192 +module_init(vc_cma_init);
1193 +module_exit(vc_cma_exit);
1194 +MODULE_LICENSE("GPL");
1195 +MODULE_AUTHOR("Broadcom Corporation");
1196 --- linux-3.10/drivers/char/Makefile 2013-06-30 23:13:29.000000000 +0100
1197 +++ linux-rpi-3.10.y/drivers/char/Makefile 2013-07-06 15:25:50.000000000 +0100
1201 obj-$(CONFIG_TILE_SROM) += tile-srom.o
1203 +obj-$(CONFIG_BRCM_CHAR_DRIVERS) += broadcom/
1205 --- linux-3.10/drivers/char/Kconfig 2013-06-30 23:13:29.000000000 +0100
1206 +++ linux-rpi-3.10.y/drivers/char/Kconfig 2013-07-06 15:25:50.000000000 +0100
1209 source "drivers/s390/char/Kconfig"
1211 +source "drivers/char/broadcom/Kconfig"
1214 bool "Enable device interface for some SMD packet ports"
1216 diff -urN linux-3.10/drivers/misc/Kconfig linux-rpi-3.10.y/drivers/misc/Kconfig
1217 --- linux-3.10/drivers/misc/Kconfig 2013-06-30 23:13:29.000000000 +0100
1218 +++ linux-rpi-3.10.y/drivers/misc/Kconfig 2013-07-06 15:25:50.000000000 +0100
1220 source "drivers/misc/altera-stapl/Kconfig"
1221 source "drivers/misc/mei/Kconfig"
1222 source "drivers/misc/vmw_vmci/Kconfig"
1223 +source "drivers/misc/vc04_services/Kconfig"
1226 diff -urN linux-3.10/drivers/misc/Makefile linux-rpi-3.10.y/drivers/misc/Makefile
1227 --- linux-3.10/drivers/misc/Makefile 2013-06-30 23:13:29.000000000 +0100
1228 +++ linux-rpi-3.10.y/drivers/misc/Makefile 2013-07-06 15:25:50.000000000 +0100
1230 obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
1231 obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
1232 obj-$(CONFIG_SRAM) += sram.o
1233 +obj-$(CONFIG_BCM2708_VCHIQ) += vc04_services/
1234 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchi/connections/connection.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/connections/connection.h
1235 --- linux-3.10/drivers/misc/vc04_services/interface/vchi/connections/connection.h 1970-01-01 01:00:00.000000000 +0100
1236 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/connections/connection.h 2013-07-06 15:25:50.000000000 +0100
1239 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1241 + * Redistribution and use in source and binary forms, with or without
1242 + * modification, are permitted provided that the following conditions
1244 + * 1. Redistributions of source code must retain the above copyright
1245 + * notice, this list of conditions, and the following disclaimer,
1246 + * without modification.
1247 + * 2. Redistributions in binary form must reproduce the above copyright
1248 + * notice, this list of conditions and the following disclaimer in the
1249 + * documentation and/or other materials provided with the distribution.
1250 + * 3. The names of the above-listed copyright holders may not be used
1251 + * to endorse or promote products derived from this software without
1252 + * specific prior written permission.
1254 + * ALTERNATIVELY, this software may be distributed under the terms of the
1255 + * GNU General Public License ("GPL") version 2, as published by the Free
1256 + * Software Foundation.
1258 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1259 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1260 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1261 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1262 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1263 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1264 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1265 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1266 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1267 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1268 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1271 +#ifndef CONNECTION_H_
1272 +#define CONNECTION_H_
1274 +#include <linux/kernel.h>
1275 +#include <linux/types.h>
1276 +#include <linux/semaphore.h>
1278 +#include "interface/vchi/vchi_cfg_internal.h"
1279 +#include "interface/vchi/vchi_common.h"
1280 +#include "interface/vchi/message_drivers/message.h"
1282 +/******************************************************************************
1284 + *****************************************************************************/
1286 +// Opaque handle for a connection / service pair
1287 +typedef struct opaque_vchi_connection_connected_service_handle_t *VCHI_CONNECTION_SERVICE_HANDLE_T;
1289 +// opaque handle to the connection state information
1290 +typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
1292 +typedef struct vchi_connection_t VCHI_CONNECTION_T;
1295 +/******************************************************************************
1297 + *****************************************************************************/
1299 +// Routine to init a connection with a particular low level driver
1300 +typedef VCHI_CONNECTION_STATE_T * (*VCHI_CONNECTION_INIT_T)( struct vchi_connection_t * connection,
1301 + const VCHI_MESSAGE_DRIVER_T * driver );
1303 +// Routine to control CRC enabling at a connection level
1304 +typedef int32_t (*VCHI_CONNECTION_CRC_CONTROL_T)( VCHI_CONNECTION_STATE_T *state_handle,
1305 + VCHI_CRC_CONTROL_T control );
1307 +// Routine to create a service
1308 +typedef int32_t (*VCHI_CONNECTION_SERVICE_CONNECT_T)( VCHI_CONNECTION_STATE_T *state_handle,
1309 + int32_t service_id,
1310 + uint32_t rx_fifo_size,
1311 + uint32_t tx_fifo_size,
1313 + VCHI_CALLBACK_T callback,
1314 + void *callback_param,
1316 + int32_t want_unaligned_bulk_rx,
1317 + int32_t want_unaligned_bulk_tx,
1318 + VCHI_CONNECTION_SERVICE_HANDLE_T *service_handle );
1320 +// Routine to close a service
1321 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DISCONNECT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle );
1323 +// Routine to queue a message
1324 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1326 + uint32_t data_size,
1327 + VCHI_FLAGS_T flags,
1328 + void *msg_handle );
1330 +// scatter-gather (vector) message queueing
1331 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1332 + VCHI_MSG_VECTOR_T *vector,
1334 + VCHI_FLAGS_T flags,
1335 + void *msg_handle );
1337 +// Routine to dequeue a message
1338 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1340 + uint32_t max_data_size_to_read,
1341 + uint32_t *actual_msg_size,
1342 + VCHI_FLAGS_T flags );
1344 +// Routine to peek at a message
1345 +typedef int32_t (*VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1347 + uint32_t *msg_size,
1348 + VCHI_FLAGS_T flags );
1350 +// Routine to hold a message
1351 +typedef int32_t (*VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1353 + uint32_t *msg_size,
1354 + VCHI_FLAGS_T flags,
1355 + void **message_handle );
1357 +// Routine to initialise a received message iterator
1358 +typedef int32_t (*VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1359 + VCHI_MSG_ITER_T *iter,
1360 + VCHI_FLAGS_T flags );
1362 +// Routine to release a held message
1363 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_RELEASE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1364 + void *message_handle );
1366 +// Routine to get info on a held message
1367 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_INFO_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1368 + void *message_handle,
1370 + int32_t *msg_size,
1371 + uint32_t *tx_timestamp,
1372 + uint32_t *rx_timestamp );
1374 +// Routine to check whether the iterator has a next message
1375 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
1376 + const VCHI_MSG_ITER_T *iter );
1378 +// Routine to advance the iterator
1379 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
1380 + VCHI_MSG_ITER_T *iter,
1382 + uint32_t *msg_size );
1384 +// Routine to remove the last message returned by the iterator
1385 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_REMOVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
1386 + VCHI_MSG_ITER_T *iter );
1388 +// Routine to hold the last message returned by the iterator
1389 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HOLD_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
1390 + VCHI_MSG_ITER_T *iter,
1391 + void **msg_handle );
1393 +// Routine to transmit bulk data
1394 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1395 + const void *data_src,
1396 + uint32_t data_size,
1397 + VCHI_FLAGS_T flags,
1398 + void *bulk_handle );
1400 +// Routine to receive data
1401 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1403 + uint32_t data_size,
1404 + VCHI_FLAGS_T flags,
1405 + void *bulk_handle );
1407 +// Routine to report if a server is available
1408 +typedef int32_t (*VCHI_CONNECTION_SERVER_PRESENT)( VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t peer_flags );
1410 +// Routine to report the number of RX slots available
1411 +typedef int (*VCHI_CONNECTION_RX_SLOTS_AVAILABLE)( const VCHI_CONNECTION_STATE_T *state );
1413 +// Routine to report the RX slot size
1414 +typedef uint32_t (*VCHI_CONNECTION_RX_SLOT_SIZE)( const VCHI_CONNECTION_STATE_T *state );
1416 +// Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
1417 +typedef void (*VCHI_CONNECTION_RX_BULK_BUFFER_ADDED)(VCHI_CONNECTION_STATE_T *state,
1420 + MESSAGE_TX_CHANNEL_T channel,
1421 + uint32_t channel_params,
1422 + uint32_t data_length,
1423 + uint32_t data_offset);
1425 +// Callback to inform a service that a Xon or Xoff message has been received
1426 +typedef void (*VCHI_CONNECTION_FLOW_CONTROL)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t xoff);
1428 +// Callback to inform a service that a server available reply message has been received
1429 +typedef void (*VCHI_CONNECTION_SERVER_AVAILABLE_REPLY)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, uint32_t flags);
1431 +// Callback to indicate that bulk auxiliary messages have arrived
1432 +typedef void (*VCHI_CONNECTION_BULK_AUX_RECEIVED)(VCHI_CONNECTION_STATE_T *state);
1434 +// Callback to indicate that bulk auxiliary messages have arrived
1435 +typedef void (*VCHI_CONNECTION_BULK_AUX_TRANSMITTED)(VCHI_CONNECTION_STATE_T *state, void *handle);
1437 +// Callback with all the connection info you require
1438 +typedef void (*VCHI_CONNECTION_INFO)(VCHI_CONNECTION_STATE_T *state, uint32_t protocol_version, uint32_t slot_size, uint32_t num_slots, uint32_t min_bulk_size);
1440 +// Callback to inform of a disconnect
1441 +typedef void (*VCHI_CONNECTION_DISCONNECT)(VCHI_CONNECTION_STATE_T *state, uint32_t flags);
1443 +// Callback to inform of a power control request
1444 +typedef void (*VCHI_CONNECTION_POWER_CONTROL)(VCHI_CONNECTION_STATE_T *state, MESSAGE_TX_CHANNEL_T channel, int32_t enable);
1446 +// allocate memory suitably aligned for this connection
1447 +typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, uint32_t * length);
1449 +// free memory allocated by buffer_allocate
1450 +typedef void (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
1453 +/******************************************************************************
1454 + System driver struct
1455 + *****************************************************************************/
1457 +struct opaque_vchi_connection_api_t
1459 + // Routine to init the connection
1460 + VCHI_CONNECTION_INIT_T init;
1462 + // Connection-level CRC control
1463 + VCHI_CONNECTION_CRC_CONTROL_T crc_control;
1465 + // Routine to connect to or create service
1466 + VCHI_CONNECTION_SERVICE_CONNECT_T service_connect;
1468 + // Routine to disconnect from a service
1469 + VCHI_CONNECTION_SERVICE_DISCONNECT_T service_disconnect;
1471 + // Routine to queue a message
1472 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T service_queue_msg;
1474 + // scatter-gather (vector) message queue
1475 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T service_queue_msgv;
1477 + // Routine to dequeue a message
1478 + VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T service_dequeue_msg;
1480 + // Routine to peek at a message
1481 + VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T service_peek_msg;
1483 + // Routine to hold a message
1484 + VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T service_hold_msg;
1486 + // Routine to initialise a received message iterator
1487 + VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T service_look_ahead_msg;
1489 + // Routine to release a message
1490 + VCHI_CONNECTION_HELD_MSG_RELEASE_T held_msg_release;
1492 + // Routine to get information on a held message
1493 + VCHI_CONNECTION_HELD_MSG_INFO_T held_msg_info;
1495 + // Routine to check for next message on iterator
1496 + VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T msg_iter_has_next;
1498 + // Routine to get next message on iterator
1499 + VCHI_CONNECTION_MSG_ITER_NEXT_T msg_iter_next;
1501 + // Routine to remove the last message returned by iterator
1502 + VCHI_CONNECTION_MSG_ITER_REMOVE_T msg_iter_remove;
1504 + // Routine to hold the last message returned by iterator
1505 + VCHI_CONNECTION_MSG_ITER_HOLD_T msg_iter_hold;
1507 + // Routine to transmit bulk data
1508 + VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T bulk_queue_transmit;
1510 + // Routine to receive data
1511 + VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T bulk_queue_receive;
1513 + // Routine to report the available servers
1514 + VCHI_CONNECTION_SERVER_PRESENT server_present;
1516 + // Routine to report the number of RX slots available
1517 + VCHI_CONNECTION_RX_SLOTS_AVAILABLE connection_rx_slots_available;
1519 + // Routine to report the RX slot size
1520 + VCHI_CONNECTION_RX_SLOT_SIZE connection_rx_slot_size;
1522 + // Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
1523 + VCHI_CONNECTION_RX_BULK_BUFFER_ADDED rx_bulk_buffer_added;
1525 + // Callback to inform a service that a Xon or Xoff message has been received
1526 + VCHI_CONNECTION_FLOW_CONTROL flow_control;
1528 + // Callback to inform a service that a server available reply message has been received
1529 + VCHI_CONNECTION_SERVER_AVAILABLE_REPLY server_available_reply;
1531 + // Callback to indicate that bulk auxiliary messages have arrived
1532 + VCHI_CONNECTION_BULK_AUX_RECEIVED bulk_aux_received;
1534 + // Callback to indicate that a bulk auxiliary message has been transmitted
1535 + VCHI_CONNECTION_BULK_AUX_TRANSMITTED bulk_aux_transmitted;
1537 + // Callback to provide information about the connection
1538 + VCHI_CONNECTION_INFO connection_info;
1540 + // Callback to notify that peer has requested disconnect
1541 + VCHI_CONNECTION_DISCONNECT disconnect;
1543 + // Callback to notify that peer has requested power change
1544 + VCHI_CONNECTION_POWER_CONTROL power_control;
1546 + // allocate memory suitably aligned for this connection
1547 + VCHI_BUFFER_ALLOCATE buffer_allocate;
1549 + // free memory allocated by buffer_allocate
1550 + VCHI_BUFFER_FREE buffer_free;
1554 +struct vchi_connection_t {
1555 + const VCHI_CONNECTION_API_T *api;
1556 + VCHI_CONNECTION_STATE_T *state;
1557 +#ifdef VCHI_COARSE_LOCKING
1558 + struct semaphore sem;
1563 +#endif /* CONNECTION_H_ */
1565 +/****************************** End of file **********************************/
1566 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
1567 --- linux-3.10/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h 1970-01-01 01:00:00.000000000 +0100
1568 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h 2013-07-06 15:25:50.000000000 +0100
1571 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1573 + * Redistribution and use in source and binary forms, with or without
1574 + * modification, are permitted provided that the following conditions
1576 + * 1. Redistributions of source code must retain the above copyright
1577 + * notice, this list of conditions, and the following disclaimer,
1578 + * without modification.
1579 + * 2. Redistributions in binary form must reproduce the above copyright
1580 + * notice, this list of conditions and the following disclaimer in the
1581 + * documentation and/or other materials provided with the distribution.
1582 + * 3. The names of the above-listed copyright holders may not be used
1583 + * to endorse or promote products derived from this software without
1584 + * specific prior written permission.
1586 + * ALTERNATIVELY, this software may be distributed under the terms of the
1587 + * GNU General Public License ("GPL") version 2, as published by the Free
1588 + * Software Foundation.
1590 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1591 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1592 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1593 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1594 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1595 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1596 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1597 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1598 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1599 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1600 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1603 +#ifndef _VCHI_MESSAGE_H_
1604 +#define _VCHI_MESSAGE_H_
1606 +#include <linux/kernel.h>
1607 +#include <linux/types.h>
1608 +#include <linux/semaphore.h>
1610 +#include "interface/vchi/vchi_cfg_internal.h"
1611 +#include "interface/vchi/vchi_common.h"
1614 +typedef enum message_event_type {
1615 + MESSAGE_EVENT_NONE,
1616 + MESSAGE_EVENT_NOP,
1617 + MESSAGE_EVENT_MESSAGE,
1618 + MESSAGE_EVENT_SLOT_COMPLETE,
1619 + MESSAGE_EVENT_RX_BULK_PAUSED,
1620 + MESSAGE_EVENT_RX_BULK_COMPLETE,
1621 + MESSAGE_EVENT_TX_COMPLETE,
1622 + MESSAGE_EVENT_MSG_DISCARDED
1623 +} MESSAGE_EVENT_TYPE_T;
1625 +typedef enum vchi_msg_flags
1627 + VCHI_MSG_FLAGS_NONE = 0x0,
1628 + VCHI_MSG_FLAGS_TERMINATE_DMA = 0x1
1629 +} VCHI_MSG_FLAGS_T;
1631 +typedef enum message_tx_channel
1633 + MESSAGE_TX_CHANNEL_MESSAGE = 0,
1634 + MESSAGE_TX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
1635 +} MESSAGE_TX_CHANNEL_T;
1637 +// Macros used for cycling through bulk channels
1638 +#define MESSAGE_TX_CHANNEL_BULK_PREV(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION-1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
1639 +#define MESSAGE_TX_CHANNEL_BULK_NEXT(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
1641 +typedef enum message_rx_channel
1643 + MESSAGE_RX_CHANNEL_MESSAGE = 0,
1644 + MESSAGE_RX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
1645 +} MESSAGE_RX_CHANNEL_T;
1647 +// Message receive slot information
1648 +typedef struct rx_msg_slot_info {
1650 + struct rx_msg_slot_info *next;
1651 + //struct slot_info *prev;
1652 +#if !defined VCHI_COARSE_LOCKING
1653 + struct semaphore sem;
1656 + uint8_t *addr; // base address of slot
1657 + uint32_t len; // length of slot in bytes
1659 + uint32_t write_ptr; // hardware causes this to advance
1660 + uint32_t read_ptr; // this module does the reading
1661 + int active; // is this slot in the hardware dma fifo?
1662 + uint32_t msgs_parsed; // count how many messages are in this slot
1663 + uint32_t msgs_released; // how many messages have been released
1664 + void *state; // connection state information
1665 + uint8_t ref_count[VCHI_MAX_SERVICES_PER_CONNECTION]; // reference count for slots held by services
1666 +} RX_MSG_SLOTINFO_T;
1668 +// The message driver no longer needs to know about the fields of RX_BULK_SLOTINFO_T - sort this out.
1669 +// In particular, it mustn't use addr and len - they're the client buffer, but the message
1670 +// driver will be tasked with sending the aligned core section.
1671 +typedef struct rx_bulk_slotinfo_t {
1672 + struct rx_bulk_slotinfo_t *next;
1674 + struct semaphore *blocking;
1680 + // needed for the callback
1683 + VCHI_FLAGS_T flags;
1684 +} RX_BULK_SLOTINFO_T;
1687 +/* ----------------------------------------------------------------------
1688 + * each connection driver will have a pool of the following struct.
1690 + * the pool will be managed by vchi_qman_*
1691 + * this means there will be multiple queues (single linked lists)
1692 + * a given struct message_info will be on exactly one of these queues
1694 + * -------------------------------------------------------------------- */
1695 +typedef struct rx_message_info {
1697 + struct message_info *next;
1698 + //struct message_info *prev;
1702 + RX_MSG_SLOTINFO_T *slot; // points to whichever slot contains this message
1703 + uint32_t tx_timestamp;
1704 + uint32_t rx_timestamp;
1706 +} RX_MESSAGE_INFO_T;
1709 + MESSAGE_EVENT_TYPE_T type;
1713 + void *addr; // address of message
1714 + uint16_t slot_delta; // whether this message indicated slot delta
1715 + uint32_t len; // length of message
1716 + RX_MSG_SLOTINFO_T *slot; // slot this message is in
1717 + int32_t service; // service id this message is destined for
1718 + uint32_t tx_timestamp; // timestamp from the header
1719 + uint32_t rx_timestamp; // timestamp when we parsed it
1722 + // FIXME: cleanup slot reporting...
1723 + RX_MSG_SLOTINFO_T *rx_msg;
1724 + RX_BULK_SLOTINFO_T *rx_bulk;
1726 + MESSAGE_TX_CHANNEL_T tx_channel;
1732 +typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
1735 + VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
1736 +} VCHI_MESSAGE_DRIVER_OPEN_T;
1739 +// handle to this instance of message driver (as returned by ->open)
1740 +typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
1742 +struct opaque_vchi_message_driver_t {
1743 + VCHI_MDRIVER_HANDLE_T *(*open)( VCHI_MESSAGE_DRIVER_OPEN_T *params, void *state );
1744 + int32_t (*suspending)( VCHI_MDRIVER_HANDLE_T *handle );
1745 + int32_t (*resumed)( VCHI_MDRIVER_HANDLE_T *handle );
1746 + int32_t (*power_control)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T, int32_t enable );
1747 + int32_t (*add_msg_rx_slot)( VCHI_MDRIVER_HANDLE_T *handle, RX_MSG_SLOTINFO_T *slot ); // rx message
1748 + int32_t (*add_bulk_rx)( VCHI_MDRIVER_HANDLE_T *handle, void *data, uint32_t len, RX_BULK_SLOTINFO_T *slot ); // rx data (bulk)
1749 + int32_t (*send)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, VCHI_MSG_FLAGS_T flags, void *send_handle ); // tx (message & bulk)
1750 + void (*next_event)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_EVENT_T *event ); // get the next event from message_driver
1751 + int32_t (*enable)( VCHI_MDRIVER_HANDLE_T *handle );
1752 + int32_t (*form_message)( VCHI_MDRIVER_HANDLE_T *handle, int32_t service_id, VCHI_MSG_VECTOR_T *vector, uint32_t count, void
1753 + *address, uint32_t length_avail, uint32_t max_total_length, int32_t pad_to_fill, int32_t allow_partial );
1755 + int32_t (*update_message)( VCHI_MDRIVER_HANDLE_T *handle, void *dest, int16_t *slot_count );
1756 + int32_t (*buffer_aligned)( VCHI_MDRIVER_HANDLE_T *handle, int tx, int uncached, const void *address, const uint32_t length );
1757 + void * (*allocate_buffer)( VCHI_MDRIVER_HANDLE_T *handle, uint32_t *length );
1758 + void (*free_buffer)( VCHI_MDRIVER_HANDLE_T *handle, void *address );
1759 + int (*rx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
1760 + int (*tx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
1762 + int32_t (*tx_supports_terminate)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
1763 + uint32_t (*tx_bulk_chunk_size)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
1764 + int (*tx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
1765 + int (*rx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_RX_CHANNEL_T channel );
1766 + void (*form_bulk_aux)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, uint32_t chunk_size, const void **aux_data, int32_t *aux_len );
1767 + void (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
1771 +#endif // _VCHI_MESSAGE_H_
1773 +/****************************** End of file ***********************************/
1774 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
1775 --- linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h 1970-01-01 01:00:00.000000000 +0100
1776 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h 2013-07-06 15:25:50.000000000 +0100
1779 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1781 + * Redistribution and use in source and binary forms, with or without
1782 + * modification, are permitted provided that the following conditions
1784 + * 1. Redistributions of source code must retain the above copyright
1785 + * notice, this list of conditions, and the following disclaimer,
1786 + * without modification.
1787 + * 2. Redistributions in binary form must reproduce the above copyright
1788 + * notice, this list of conditions and the following disclaimer in the
1789 + * documentation and/or other materials provided with the distribution.
1790 + * 3. The names of the above-listed copyright holders may not be used
1791 + * to endorse or promote products derived from this software without
1792 + * specific prior written permission.
1794 + * ALTERNATIVELY, this software may be distributed under the terms of the
1795 + * GNU General Public License ("GPL") version 2, as published by the Free
1796 + * Software Foundation.
1798 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1799 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1800 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1801 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1802 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1803 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1804 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1805 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1806 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1807 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1808 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1811 +#ifndef VCHI_CFG_H_
1812 +#define VCHI_CFG_H_
1814 +/****************************************************************************************
1815 + * Defines in this first section are part of the VCHI API and may be examined by VCHI
1817 + ***************************************************************************************/
1819 +/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
1820 +/* Really determined by the message driver, and should be available from a run-time call. */
1821 +#ifndef VCHI_BULK_ALIGN
1822 +# if __VCCOREVER__ >= 0x04000000
1823 +# define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
1825 +# define VCHI_BULK_ALIGN 16
1829 +/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
1830 +/* May be less than or greater than VCHI_BULK_ALIGN */
1831 +/* Really determined by the message driver, and should be available from a run-time call. */
1832 +#ifndef VCHI_BULK_GRANULARITY
1833 +# if __VCCOREVER__ >= 0x04000000
1834 +# define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
1836 +# define VCHI_BULK_GRANULARITY 16
1840 +/* The largest possible message to be queued with vchi_msg_queue. */
1841 +#ifndef VCHI_MAX_MSG_SIZE
1842 +# if defined VCHI_LOCAL_HOST_PORT
1843 +# define VCHI_MAX_MSG_SIZE 16384 // makes file transfers fast, but should they be using bulk?
1845 +# define VCHI_MAX_MSG_SIZE 4096 // NOTE: THIS MUST BE LARGER THAN OR EQUAL TO THE SIZE OF THE KHRONOS MERGE BUFFER!!
1849 +/******************************************************************************************
1850 + * Defines below are system configuration options, and should not be used by VCHI services.
1851 + *****************************************************************************************/
1853 +/* How many connections can we support? A localhost implementation uses 2 connections,
1854 + * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
1856 +#ifndef VCHI_MAX_NUM_CONNECTIONS
1857 +# define VCHI_MAX_NUM_CONNECTIONS 3
1860 +/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
1861 + * amount of static memory. */
1862 +#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
1863 +# define VCHI_MAX_SERVICES_PER_CONNECTION 36
1866 +/* Adjust if using a message driver that supports more logical TX channels */
1867 +#ifndef VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION
1868 +# define VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION 9 // 1 MPHI + 8 CCP2 logical channels
1871 +/* Adjust if using a message driver that supports more logical RX channels */
1872 +#ifndef VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION
1873 +# define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
1876 +/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
1877 + * receive queue space, less message headers. */
1878 +#ifndef VCHI_NUM_READ_SLOTS
1879 +# if defined(VCHI_LOCAL_HOST_PORT)
1880 +# define VCHI_NUM_READ_SLOTS 4
1882 +# define VCHI_NUM_READ_SLOTS 48
1886 +/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
1887 + * performance. Only define on VideoCore end, talking to host.
1889 +//#define VCHI_MSG_RX_OVERRUN
1891 +/* How many transmit slots do we use. Generally don't need many, as the hardware driver
1892 + * underneath VCHI will usually have its own buffering. */
1893 +#ifndef VCHI_NUM_WRITE_SLOTS
1894 +# define VCHI_NUM_WRITE_SLOTS 4
1897 +/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
1898 + * then it's taking up too much buffer space, and the peer service will be told to stop
1899 + * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
1900 + * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
1902 +#ifndef VCHI_XOFF_THRESHOLD
1903 +# define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
1906 +/* After we've sent an XOFF, the peer will be told to resume transmission once the local
1907 + * service has dequeued/released enough messages that it's now occupying
1908 + * VCHI_XON_THRESHOLD slots or fewer. */
1909 +#ifndef VCHI_XON_THRESHOLD
1910 +# define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
1913 +/* A size below which a bulk transfer omits the handshake completely and always goes
1914 + * via the message channel, if bulk auxiliary is being sent on that service. (The user
1915 + * can guarantee this by enabling unaligned transmits).
1917 +#ifndef VCHI_MIN_BULK_SIZE
1918 +# define VCHI_MIN_BULK_SIZE ( VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096 )
1921 +/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
1922 + * speed and latency; the smaller the chunk size the better change of messages and other
1923 + * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
1924 + * break transmissions into chunks.
1926 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
1927 +# define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
1930 +/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
1931 + * with multiple-line frames. Only use if the receiver can cope. */
1932 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
1933 +# define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
1936 +/* How many TX messages can we have pending in our transmit slots. Once exhausted,
1937 + * vchi_msg_queue will be blocked. */
1938 +#ifndef VCHI_TX_MSG_QUEUE_SIZE
1939 +# define VCHI_TX_MSG_QUEUE_SIZE 256
1942 +/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
1943 + * will be suspended until older messages are dequeued/released. */
1944 +#ifndef VCHI_RX_MSG_QUEUE_SIZE
1945 +# define VCHI_RX_MSG_QUEUE_SIZE 256
1948 +/* Really should be able to cope if we run out of received message descriptors, by
1949 + * suspending parsing as the comment above says, but we don't. This sweeps the issue
1950 + * under the carpet. */
1951 +#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1952 +# undef VCHI_RX_MSG_QUEUE_SIZE
1953 +# define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1956 +/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
1957 + * will be blocked. */
1958 +#ifndef VCHI_TX_BULK_QUEUE_SIZE
1959 +# define VCHI_TX_BULK_QUEUE_SIZE 64
1962 +/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
1963 + * will be blocked. */
1964 +#ifndef VCHI_RX_BULK_QUEUE_SIZE
1965 +# define VCHI_RX_BULK_QUEUE_SIZE 64
1968 +/* A limit on how many outstanding bulk requests we expect the peer to give us. If
1969 + * the peer asks for more than this, VCHI will fail and assert. The number is determined
1970 + * by the peer's hardware - it's the number of outstanding requests that can be queued
1971 + * on all bulk channels. VC3's MPHI peripheral allows 16. */
1972 +#ifndef VCHI_MAX_PEER_BULK_REQUESTS
1973 +# define VCHI_MAX_PEER_BULK_REQUESTS 32
1976 +/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
1977 + * transmitter on and off.
1979 +/*#define VCHI_CCP2TX_MANUAL_POWER*/
1981 +#ifndef VCHI_CCP2TX_MANUAL_POWER
1983 +/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
1984 + * negative for no IDLE.
1986 +# ifndef VCHI_CCP2TX_IDLE_TIMEOUT
1987 +# define VCHI_CCP2TX_IDLE_TIMEOUT 5
1990 +/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
1991 + * negative for no OFF.
1993 +# ifndef VCHI_CCP2TX_OFF_TIMEOUT
1994 +# define VCHI_CCP2TX_OFF_TIMEOUT 1000
1997 +#endif /* VCHI_CCP2TX_MANUAL_POWER */
1999 +#endif /* VCHI_CFG_H_ */
2001 +/****************************** End of file **********************************/
2002 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
2003 --- linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h 1970-01-01 01:00:00.000000000 +0100
2004 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h 2013-07-06 15:25:50.000000000 +0100
2007 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2009 + * Redistribution and use in source and binary forms, with or without
2010 + * modification, are permitted provided that the following conditions
2012 + * 1. Redistributions of source code must retain the above copyright
2013 + * notice, this list of conditions, and the following disclaimer,
2014 + * without modification.
2015 + * 2. Redistributions in binary form must reproduce the above copyright
2016 + * notice, this list of conditions and the following disclaimer in the
2017 + * documentation and/or other materials provided with the distribution.
2018 + * 3. The names of the above-listed copyright holders may not be used
2019 + * to endorse or promote products derived from this software without
2020 + * specific prior written permission.
2022 + * ALTERNATIVELY, this software may be distributed under the terms of the
2023 + * GNU General Public License ("GPL") version 2, as published by the Free
2024 + * Software Foundation.
2026 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2027 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2028 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2029 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2030 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2031 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2032 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2033 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2034 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2035 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2036 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2039 +#ifndef VCHI_CFG_INTERNAL_H_
2040 +#define VCHI_CFG_INTERNAL_H_
2042 +/****************************************************************************************
2043 + * Control optimisation attempts.
2044 + ***************************************************************************************/
2046 +// Don't use lots of short-term locks - use great long ones, reducing the overall locks-per-second
2047 +#define VCHI_COARSE_LOCKING
2049 +// Avoid lock then unlock on exit from blocking queue operations (msg tx, bulk rx/tx)
2050 +// (only relevant if VCHI_COARSE_LOCKING)
2051 +#define VCHI_ELIDE_BLOCK_EXIT_LOCK
2053 +// Avoid lock on non-blocking peek
2054 +// (only relevant if VCHI_COARSE_LOCKING)
2055 +#define VCHI_AVOID_PEEK_LOCK
2057 +// Use one slot-handler thread per connection, rather than 1 thread dealing with all connections in rotation.
2058 +#define VCHI_MULTIPLE_HANDLER_THREADS
2060 +// Put free descriptors onto the head of the free queue, rather than the tail, so that we don't thrash
2061 +// our way through the pool of descriptors.
2062 +#define VCHI_PUSH_FREE_DESCRIPTORS_ONTO_HEAD
2064 +// Don't issue a MSG_AVAILABLE callback for every single message. Possibly only safe if VCHI_COARSE_LOCKING.
2065 +#define VCHI_FEWER_MSG_AVAILABLE_CALLBACKS
2067 +// Don't use message descriptors for TX messages that don't need them
2068 +#define VCHI_MINIMISE_TX_MSG_DESCRIPTORS
2070 +// Nano-locks for multiqueue
2071 +//#define VCHI_MQUEUE_NANOLOCKS
2073 +// Lock-free(er) dequeuing
2074 +//#define VCHI_RX_NANOLOCKS
2076 +#endif /*VCHI_CFG_INTERNAL_H_*/
2077 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_common.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_common.h
2078 --- linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_common.h 1970-01-01 01:00:00.000000000 +0100
2079 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_common.h 2013-07-06 15:25:50.000000000 +0100
2082 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2084 + * Redistribution and use in source and binary forms, with or without
2085 + * modification, are permitted provided that the following conditions
2087 + * 1. Redistributions of source code must retain the above copyright
2088 + * notice, this list of conditions, and the following disclaimer,
2089 + * without modification.
2090 + * 2. Redistributions in binary form must reproduce the above copyright
2091 + * notice, this list of conditions and the following disclaimer in the
2092 + * documentation and/or other materials provided with the distribution.
2093 + * 3. The names of the above-listed copyright holders may not be used
2094 + * to endorse or promote products derived from this software without
2095 + * specific prior written permission.
2097 + * ALTERNATIVELY, this software may be distributed under the terms of the
2098 + * GNU General Public License ("GPL") version 2, as published by the Free
2099 + * Software Foundation.
2101 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2102 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2103 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2104 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2105 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2106 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2107 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2108 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2109 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2110 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2111 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2114 +#ifndef VCHI_COMMON_H_
2115 +#define VCHI_COMMON_H_
2118 +//flags used when sending messages (must be bitmapped)
2121 + VCHI_FLAGS_NONE = 0x0,
2122 + VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
2123 + VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
2124 + VCHI_FLAGS_BLOCK_UNTIL_QUEUED = 0x4, // return once the transfer is in a queue ready to go
2125 + VCHI_FLAGS_ALLOW_PARTIAL = 0x8,
2126 + VCHI_FLAGS_BLOCK_UNTIL_DATA_READ = 0x10,
2127 + VCHI_FLAGS_CALLBACK_WHEN_DATA_READ = 0x20,
2129 + VCHI_FLAGS_ALIGN_SLOT = 0x000080, // internal use only
2130 + VCHI_FLAGS_BULK_AUX_QUEUED = 0x010000, // internal use only
2131 + VCHI_FLAGS_BULK_AUX_COMPLETE = 0x020000, // internal use only
2132 + VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
2133 + VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
2134 + VCHI_FLAGS_INTERNAL = 0xFF0000
2137 +// constants for vchi_crc_control()
2139 + VCHI_CRC_NOTHING = -1,
2140 + VCHI_CRC_PER_SERVICE = 0,
2141 + VCHI_CRC_EVERYTHING = 1,
2142 +} VCHI_CRC_CONTROL_T;
2144 +//callback reasons when an event occurs on a service
2147 + VCHI_CALLBACK_REASON_MIN,
2149 + //This indicates that there is data available
2150 + //handle is the msg id that was transmitted with the data
2151 + // When a message is received and there was no FULL message available previously, send callback
2152 + // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
2153 + VCHI_CALLBACK_MSG_AVAILABLE,
2154 + VCHI_CALLBACK_MSG_SENT,
2155 + VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
2157 + // This indicates that a transfer from the other side has completed
2158 + VCHI_CALLBACK_BULK_RECEIVED,
2159 + //This indicates that data queued up to be sent has now gone
2160 + //handle is the msg id that was used when sending the data
2161 + VCHI_CALLBACK_BULK_SENT,
2162 + VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
2163 + VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
2165 + VCHI_CALLBACK_SERVICE_CLOSED,
2167 + // this side has sent XOFF to peer due to lack of data consumption by service
2168 + // (suggests the service may need to take some recovery action if it has
2169 + // been deliberately holding off consuming data)
2170 + VCHI_CALLBACK_SENT_XOFF,
2171 + VCHI_CALLBACK_SENT_XON,
2173 + // indicates that a bulk transfer has finished reading the source buffer
2174 + VCHI_CALLBACK_BULK_DATA_READ,
2176 + // power notification events (currently host side only)
2177 + VCHI_CALLBACK_PEER_OFF,
2178 + VCHI_CALLBACK_PEER_SUSPENDED,
2179 + VCHI_CALLBACK_PEER_ON,
2180 + VCHI_CALLBACK_PEER_RESUMED,
2181 + VCHI_CALLBACK_FORCED_POWER_OFF,
2183 +#ifdef USE_VCHIQ_ARM
2184 + // some extra notifications provided by vchiq_arm
2185 + VCHI_CALLBACK_SERVICE_OPENED,
2186 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
2187 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
2190 + VCHI_CALLBACK_REASON_MAX
2191 +} VCHI_CALLBACK_REASON_T;
2193 +//Calback used by all services / bulk transfers
2194 +typedef void (*VCHI_CALLBACK_T)( void *callback_param, //my service local param
2195 + VCHI_CALLBACK_REASON_T reason,
2196 + void *handle ); //for transmitting msg's only
2201 + * Define vector struct for scatter-gather (vector) operations
2202 + * Vectors can be nested - if a vector element has negative length, then
2203 + * the data pointer is treated as pointing to another vector array, with
2204 + * '-vec_len' elements. Thus to append a header onto an existing vector,
2205 + * you can do this:
2207 + * void foo(const VCHI_MSG_VECTOR_T *v, int n)
2209 + * VCHI_MSG_VECTOR_T nv[2];
2210 + * nv[0].vec_base = my_header;
2211 + * nv[0].vec_len = sizeof my_header;
2212 + * nv[1].vec_base = v;
2213 + * nv[1].vec_len = -n;
2217 +typedef struct vchi_msg_vector {
2218 + const void *vec_base;
2220 +} VCHI_MSG_VECTOR_T;
2222 +// Opaque type for a connection API
2223 +typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
2225 +// Opaque type for a message driver
2226 +typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
2229 +// Iterator structure for reading ahead through received message queue. Allocated by client,
2230 +// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
2231 +// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
2232 +// will not proceed to messages received since. Behaviour is undefined if an iterator
2233 +// is used again after messages for that service are removed/dequeued by any
2234 +// means other than vchi_msg_iter_... calls on the iterator itself.
2236 + struct opaque_vchi_service_t *service;
2243 +#endif // VCHI_COMMON_H_
2244 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi.h
2245 --- linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi.h 1970-01-01 01:00:00.000000000 +0100
2246 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi.h 2013-07-06 15:25:50.000000000 +0100
2249 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2251 + * Redistribution and use in source and binary forms, with or without
2252 + * modification, are permitted provided that the following conditions
2254 + * 1. Redistributions of source code must retain the above copyright
2255 + * notice, this list of conditions, and the following disclaimer,
2256 + * without modification.
2257 + * 2. Redistributions in binary form must reproduce the above copyright
2258 + * notice, this list of conditions and the following disclaimer in the
2259 + * documentation and/or other materials provided with the distribution.
2260 + * 3. The names of the above-listed copyright holders may not be used
2261 + * to endorse or promote products derived from this software without
2262 + * specific prior written permission.
2264 + * ALTERNATIVELY, this software may be distributed under the terms of the
2265 + * GNU General Public License ("GPL") version 2, as published by the Free
2266 + * Software Foundation.
2268 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2269 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2270 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2271 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2272 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2273 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2274 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2275 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2276 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2277 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2278 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2284 +#include "interface/vchi/vchi_cfg.h"
2285 +#include "interface/vchi/vchi_common.h"
2286 +#include "interface/vchi/connections/connection.h"
2287 +#include "vchi_mh.h"
2290 +/******************************************************************************
2292 + *****************************************************************************/
2294 +#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
2295 +#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
2296 +#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
2298 +#ifdef USE_VCHIQ_ARM
2299 +#define VCHI_BULK_ALIGNED(x) 1
2301 +#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
2304 +struct vchi_version {
2306 + uint32_t version_min;
2308 +#define VCHI_VERSION(v_) { v_, v_ }
2309 +#define VCHI_VERSION_EX(v_, m_) { v_, m_ }
2316 +} VCHI_MSG_VECTOR_TYPE_T;
2318 +typedef struct vchi_msg_vector_ex {
2320 + VCHI_MSG_VECTOR_TYPE_T type;
2323 + // a memory handle
2326 + VCHI_MEM_HANDLE_T handle;
2331 + // an ordinary data pointer
2334 + const void *vec_base;
2338 + // a nested vector list
2341 + struct vchi_msg_vector_ex *vec;
2345 +} VCHI_MSG_VECTOR_EX_T;
2348 +// Construct an entry in a msg vector for a pointer (p) of length (l)
2349 +#define VCHI_VEC_POINTER(p,l) VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
2351 +// Construct an entry in a msg vector for a message handle (h), starting at offset (o) of length (l)
2352 +#define VCHI_VEC_HANDLE(h,o,l) VCHI_VEC_HANDLE, { { (h), (o), (l) } }
2354 +// Macros to manipulate 'FOURCC' values
2355 +#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
2356 +#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
2359 +// Opaque service information
2360 +struct opaque_vchi_service_t;
2362 +// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
2363 +// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
2366 + struct opaque_vchi_service_t *service;
2372 +// structure used to provide the information needed to open a server or a client
2374 + struct vchi_version version;
2375 + int32_t service_id;
2376 + VCHI_CONNECTION_T *connection;
2377 + uint32_t rx_fifo_size;
2378 + uint32_t tx_fifo_size;
2379 + VCHI_CALLBACK_T callback;
2380 + void *callback_param;
2381 + /* client intends to receive bulk transfers of
2382 + odd lengths or into unaligned buffers */
2383 + int32_t want_unaligned_bulk_rx;
2384 + /* client intends to transmit bulk transfers of
2385 + odd lengths or out of unaligned buffers */
2386 + int32_t want_unaligned_bulk_tx;
2387 + /* client wants to check CRCs on (bulk) xfers.
2388 + Only needs to be set at 1 end - will do both directions. */
2390 +} SERVICE_CREATION_T;
2392 +// Opaque handle for a VCHI instance
2393 +typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
2395 +// Opaque handle for a server or client
2396 +typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
2398 +// Service registration & startup
2399 +typedef void (*VCHI_SERVICE_INIT)(VCHI_INSTANCE_T initialise_instance, VCHI_CONNECTION_T **connections, uint32_t num_connections);
2401 +typedef struct service_info_tag {
2402 + const char * const vll_filename; /* VLL to load to start this service. This is an empty string if VLL is "static" */
2403 + VCHI_SERVICE_INIT init; /* Service initialisation function */
2404 + void *vll_handle; /* VLL handle; NULL when unloaded or a "static VLL" in build */
2407 +/******************************************************************************
2408 + Global funcs - implementation is specific to which side you are on (local / remote)
2409 + *****************************************************************************/
2415 +extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
2416 + const VCHI_MESSAGE_DRIVER_T * low_level);
2419 +// Routine used to initialise the vchi on both local + remote connections
2420 +extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
2422 +extern int32_t vchi_exit( void );
2424 +extern int32_t vchi_connect( VCHI_CONNECTION_T **connections,
2425 + const uint32_t num_connections,
2426 + VCHI_INSTANCE_T instance_handle );
2428 +//When this is called, ensure that all services have no data pending.
2429 +//Bulk transfers can remain 'queued'
2430 +extern int32_t vchi_disconnect( VCHI_INSTANCE_T instance_handle );
2432 +// Global control over bulk CRC checking
2433 +extern int32_t vchi_crc_control( VCHI_CONNECTION_T *connection,
2434 + VCHI_CRC_CONTROL_T control );
2436 +// helper functions
2437 +extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
2438 +extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
2439 +extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
2442 +/******************************************************************************
2443 + Global service API
2444 + *****************************************************************************/
2445 +// Routine to create a named service
2446 +extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
2447 + SERVICE_CREATION_T *setup,
2448 + VCHI_SERVICE_HANDLE_T *handle );
2450 +// Routine to destory a service
2451 +extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
2453 +// Routine to open a named service
2454 +extern int32_t vchi_service_open( VCHI_INSTANCE_T instance_handle,
2455 + SERVICE_CREATION_T *setup,
2456 + VCHI_SERVICE_HANDLE_T *handle);
2458 +extern int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle,
2459 + short *peer_version );
2461 +// Routine to close a named service
2462 +extern int32_t vchi_service_close( const VCHI_SERVICE_HANDLE_T handle );
2464 +// Routine to increment ref count on a named service
2465 +extern int32_t vchi_service_use( const VCHI_SERVICE_HANDLE_T handle );
2467 +// Routine to decrement ref count on a named service
2468 +extern int32_t vchi_service_release( const VCHI_SERVICE_HANDLE_T handle );
2470 +// Routine to send a message accross a service
2471 +extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
2473 + uint32_t data_size,
2474 + VCHI_FLAGS_T flags,
2475 + void *msg_handle );
2477 +// scatter-gather (vector) and send message
2478 +int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
2479 + VCHI_MSG_VECTOR_EX_T *vector,
2481 + VCHI_FLAGS_T flags,
2482 + void *msg_handle );
2484 +// legacy scatter-gather (vector) and send message, only handles pointers
2485 +int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
2486 + VCHI_MSG_VECTOR_T *vector,
2488 + VCHI_FLAGS_T flags,
2489 + void *msg_handle );
2491 +// Routine to receive a msg from a service
2492 +// Dequeue is equivalent to hold, copy into client buffer, release
2493 +extern int32_t vchi_msg_dequeue( VCHI_SERVICE_HANDLE_T handle,
2495 + uint32_t max_data_size_to_read,
2496 + uint32_t *actual_msg_size,
2497 + VCHI_FLAGS_T flags );
2499 +// Routine to look at a message in place.
2500 +// The message is not dequeued, so a subsequent call to peek or dequeue
2501 +// will return the same message.
2502 +extern int32_t vchi_msg_peek( VCHI_SERVICE_HANDLE_T handle,
2504 + uint32_t *msg_size,
2505 + VCHI_FLAGS_T flags );
2507 +// Routine to remove a message after it has been read in place with peek
2508 +// The first message on the queue is dequeued.
2509 +extern int32_t vchi_msg_remove( VCHI_SERVICE_HANDLE_T handle );
2511 +// Routine to look at a message in place.
2512 +// The message is dequeued, so the caller is left holding it; the descriptor is
2513 +// filled in and must be released when the user has finished with the message.
2514 +extern int32_t vchi_msg_hold( VCHI_SERVICE_HANDLE_T handle,
2515 + void **data, // } may be NULL, as info can be
2516 + uint32_t *msg_size, // } obtained from HELD_MSG_T
2517 + VCHI_FLAGS_T flags,
2518 + VCHI_HELD_MSG_T *message_descriptor );
2520 +// Initialise an iterator to look through messages in place
2521 +extern int32_t vchi_msg_look_ahead( VCHI_SERVICE_HANDLE_T handle,
2522 + VCHI_MSG_ITER_T *iter,
2523 + VCHI_FLAGS_T flags );
2525 +/******************************************************************************
2526 + Global service support API - operations on held messages and message iterators
2527 + *****************************************************************************/
2529 +// Routine to get the address of a held message
2530 +extern void *vchi_held_msg_ptr( const VCHI_HELD_MSG_T *message );
2532 +// Routine to get the size of a held message
2533 +extern int32_t vchi_held_msg_size( const VCHI_HELD_MSG_T *message );
2535 +// Routine to get the transmit timestamp as written into the header by the peer
2536 +extern uint32_t vchi_held_msg_tx_timestamp( const VCHI_HELD_MSG_T *message );
2538 +// Routine to get the reception timestamp, written as we parsed the header
2539 +extern uint32_t vchi_held_msg_rx_timestamp( const VCHI_HELD_MSG_T *message );
2541 +// Routine to release a held message after it has been processed
2542 +extern int32_t vchi_held_msg_release( VCHI_HELD_MSG_T *message );
2544 +// Indicates whether the iterator has a next message.
2545 +extern int32_t vchi_msg_iter_has_next( const VCHI_MSG_ITER_T *iter );
2547 +// Return the pointer and length for the next message and advance the iterator.
2548 +extern int32_t vchi_msg_iter_next( VCHI_MSG_ITER_T *iter,
2550 + uint32_t *msg_size );
2552 +// Remove the last message returned by vchi_msg_iter_next.
2553 +// Can only be called once after each call to vchi_msg_iter_next.
2554 +extern int32_t vchi_msg_iter_remove( VCHI_MSG_ITER_T *iter );
2556 +// Hold the last message returned by vchi_msg_iter_next.
2557 +// Can only be called once after each call to vchi_msg_iter_next.
2558 +extern int32_t vchi_msg_iter_hold( VCHI_MSG_ITER_T *iter,
2559 + VCHI_HELD_MSG_T *message );
2561 +// Return information for the next message, and hold it, advancing the iterator.
2562 +extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
2563 + void **data, // } may be NULL
2564 + uint32_t *msg_size, // }
2565 + VCHI_HELD_MSG_T *message );
2568 +/******************************************************************************
2570 + *****************************************************************************/
2572 +// Routine to prepare interface for a transfer from the other side
2573 +extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
2575 + uint32_t data_size,
2576 + VCHI_FLAGS_T flags,
2577 + void *transfer_handle );
2580 +// Prepare interface for a transfer from the other side into relocatable memory.
2581 +int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
2582 + VCHI_MEM_HANDLE_T h_dst,
2584 + uint32_t data_size,
2585 + const VCHI_FLAGS_T flags,
2586 + void * const bulk_handle );
2588 +// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
2589 +extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
2590 + const void *data_src,
2591 + uint32_t data_size,
2592 + VCHI_FLAGS_T flags,
2593 + void *transfer_handle );
2596 +/******************************************************************************
2597 + Configuration plumbing
2598 + *****************************************************************************/
2600 +// function prototypes for the different mid layers (the state info gives the different physical connections)
2601 +extern const VCHI_CONNECTION_API_T *single_get_func_table( void );
2602 +//extern const VCHI_CONNECTION_API_T *local_server_get_func_table( void );
2603 +//extern const VCHI_CONNECTION_API_T *local_client_get_func_table( void );
2605 +// declare all message drivers here
2606 +const VCHI_MESSAGE_DRIVER_T *vchi_mphi_message_driver_func_table( void );
2612 +extern int32_t vchi_bulk_queue_transmit_reloc( VCHI_SERVICE_HANDLE_T handle,
2613 + VCHI_MEM_HANDLE_T h_src,
2615 + uint32_t data_size,
2616 + VCHI_FLAGS_T flags,
2617 + void *transfer_handle );
2618 +#endif /* VCHI_H_ */
2620 +/****************************** End of file **********************************/
2621 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_mh.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
2622 --- linux-3.10/drivers/misc/vc04_services/interface/vchi/vchi_mh.h 1970-01-01 01:00:00.000000000 +0100
2623 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchi/vchi_mh.h 2013-07-06 15:25:50.000000000 +0100
2626 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2628 + * Redistribution and use in source and binary forms, with or without
2629 + * modification, are permitted provided that the following conditions
2631 + * 1. Redistributions of source code must retain the above copyright
2632 + * notice, this list of conditions, and the following disclaimer,
2633 + * without modification.
2634 + * 2. Redistributions in binary form must reproduce the above copyright
2635 + * notice, this list of conditions and the following disclaimer in the
2636 + * documentation and/or other materials provided with the distribution.
2637 + * 3. The names of the above-listed copyright holders may not be used
2638 + * to endorse or promote products derived from this software without
2639 + * specific prior written permission.
2641 + * ALTERNATIVELY, this software may be distributed under the terms of the
2642 + * GNU General Public License ("GPL") version 2, as published by the Free
2643 + * Software Foundation.
2645 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2646 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2647 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2648 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2649 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2650 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2651 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2652 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2653 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2654 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2655 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2661 +#include <linux/types.h>
2663 +typedef int32_t VCHI_MEM_HANDLE_T;
2664 +#define VCHI_MEM_HANDLE_INVALID 0
2667 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
2668 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 1970-01-01 01:00:00.000000000 +0100
2669 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 2013-07-06 15:25:50.000000000 +0100
2672 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2674 + * Redistribution and use in source and binary forms, with or without
2675 + * modification, are permitted provided that the following conditions
2677 + * 1. Redistributions of source code must retain the above copyright
2678 + * notice, this list of conditions, and the following disclaimer,
2679 + * without modification.
2680 + * 2. Redistributions in binary form must reproduce the above copyright
2681 + * notice, this list of conditions and the following disclaimer in the
2682 + * documentation and/or other materials provided with the distribution.
2683 + * 3. The names of the above-listed copyright holders may not be used
2684 + * to endorse or promote products derived from this software without
2685 + * specific prior written permission.
2687 + * ALTERNATIVELY, this software may be distributed under the terms of the
2688 + * GNU General Public License ("GPL") version 2, as published by the Free
2689 + * Software Foundation.
2691 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2692 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2693 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2694 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2695 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2696 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2697 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2698 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2699 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2700 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2701 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2704 +#include <linux/kernel.h>
2705 +#include <linux/types.h>
2706 +#include <linux/errno.h>
2707 +#include <linux/interrupt.h>
2708 +#include <linux/irq.h>
2709 +#include <linux/pagemap.h>
2710 +#include <linux/dma-mapping.h>
2711 +#include <linux/version.h>
2712 +#include <linux/io.h>
2713 +#include <linux/uaccess.h>
2714 +#include <asm/pgtable.h>
2716 +#include <mach/irqs.h>
2718 +#include <mach/platform.h>
2719 +#include <mach/vcio.h>
2721 +#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
2723 +#define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
2724 +#define VCHIQ_ARM_ADDRESS(x) ((void *)__virt_to_bus((unsigned)x))
2726 +#include "vchiq_arm.h"
2727 +#include "vchiq_2835.h"
2728 +#include "vchiq_connected.h"
2730 +#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
2732 +typedef struct vchiq_2835_state_struct {
2734 + VCHIQ_ARM_STATE_T arm_state;
2735 +} VCHIQ_2835_ARM_STATE_T;
2737 +static char *g_slot_mem;
2738 +static int g_slot_mem_size;
2739 +dma_addr_t g_slot_phys;
2740 +static FRAGMENTS_T *g_fragments_base;
2741 +static FRAGMENTS_T *g_free_fragments;
2742 +struct semaphore g_free_fragments_sema;
2744 +extern int vchiq_arm_log_level;
2746 +static DEFINE_SEMAPHORE(g_free_fragments_mutex);
2749 +vchiq_doorbell_irq(int irq, void *dev_id);
2752 +create_pagelist(char __user *buf, size_t count, unsigned short type,
2753 + struct task_struct *task, PAGELIST_T ** ppagelist);
2756 +free_pagelist(PAGELIST_T *pagelist, int actual);
2759 +vchiq_platform_init(VCHIQ_STATE_T *state)
2761 + VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
2762 + int frag_mem_size;
2766 + /* Allocate space for the channels in coherent memory */
2767 + g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
2768 + frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);
2770 + g_slot_mem = dma_alloc_coherent(NULL, g_slot_mem_size + frag_mem_size,
2771 + &g_slot_phys, GFP_ATOMIC);
2773 + if (!g_slot_mem) {
2774 + vchiq_log_error(vchiq_arm_log_level,
2775 + "Unable to allocate channel memory");
2777 + goto failed_alloc;
2780 + WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);
2782 + vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
2783 + if (!vchiq_slot_zero) {
2785 + goto failed_init_slots;
2788 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
2789 + (int)g_slot_phys + g_slot_mem_size;
2790 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
2793 + g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
2794 + g_slot_mem_size += frag_mem_size;
2796 + g_free_fragments = g_fragments_base;
2797 + for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
2798 + *(FRAGMENTS_T **)&g_fragments_base[i] =
2799 + &g_fragments_base[i + 1];
2801 + *(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
2802 + sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
2804 + if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
2807 + goto failed_vchiq_init;
2810 + err = request_irq(VCHIQ_DOORBELL_IRQ, vchiq_doorbell_irq,
2811 + IRQF_IRQPOLL, "VCHIQ doorbell",
2814 + vchiq_log_error(vchiq_arm_log_level, "%s: failed to register "
2815 + "irq=%d err=%d", __func__,
2816 + VCHIQ_DOORBELL_IRQ, err);
2817 + goto failed_request_irq;
2820 + /* Send the base address of the slots to VideoCore */
2822 + dsb(); /* Ensure all writes have completed */
2824 + bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);
2826 + vchiq_log_info(vchiq_arm_log_level,
2827 + "vchiq_init - done (slots %x, phys %x)",
2828 + (unsigned int)vchiq_slot_zero, g_slot_phys);
2830 + vchiq_call_connected_callbacks();
2834 +failed_request_irq:
2837 + dma_free_coherent(NULL, g_slot_mem_size, g_slot_mem, g_slot_phys);
2844 +vchiq_platform_exit(VCHIQ_STATE_T *state)
2846 + free_irq(VCHIQ_DOORBELL_IRQ, state);
2847 + dma_free_coherent(NULL, g_slot_mem_size,
2848 + g_slot_mem, g_slot_phys);
2853 +vchiq_platform_init_state(VCHIQ_STATE_T *state)
2855 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2856 + state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
2857 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
2858 + status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
2859 + if(status != VCHIQ_SUCCESS)
2861 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
2867 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
2869 + if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
2873 + return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
2877 +remote_event_signal(REMOTE_EVENT_T *event)
2883 + dsb(); /* data barrier operation */
2885 + if (event->armed) {
2886 + /* trigger vc interrupt */
2888 + writel(0, __io_address(ARM_0_BELL2));
2893 +vchiq_copy_from_user(void *dst, const void *src, int size)
2895 + if ((uint32_t)src < TASK_SIZE) {
2896 + return copy_from_user(dst, src, size);
2898 + memcpy(dst, src, size);
2904 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
2905 + void *offset, int size, int dir)
2907 + PAGELIST_T *pagelist;
2910 + WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
2912 + ret = create_pagelist((char __user *)offset, size,
2913 + (dir == VCHIQ_BULK_RECEIVE)
2919 + return VCHIQ_ERROR;
2921 + bulk->handle = memhandle;
2922 + bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
2924 + /* Store the pagelist address in remote_data, which isn't used by the
2926 + bulk->remote_data = pagelist;
2928 + return VCHIQ_SUCCESS;
2932 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
2934 + if (bulk && bulk->remote_data && bulk->actual)
2935 + free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
2939 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
2942 + * This should only be called on the master (VideoCore) side, but
2943 + * provide an implementation to avoid the need for ifdefery.
2949 +vchiq_dump_platform_state(void *dump_context)
2953 + len = snprintf(buf, sizeof(buf),
2954 + " Platform: 2835 (VC master)");
2955 + vchiq_dump(dump_context, buf, len + 1);
2959 +vchiq_platform_suspend(VCHIQ_STATE_T *state)
2961 + return VCHIQ_ERROR;
2965 +vchiq_platform_resume(VCHIQ_STATE_T *state)
2967 + return VCHIQ_SUCCESS;
2971 +vchiq_platform_paused(VCHIQ_STATE_T *state)
2976 +vchiq_platform_resumed(VCHIQ_STATE_T *state)
2981 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
2983 + return 1; // autosuspend not supported - videocore always wanted
2987 +vchiq_platform_use_suspend_timer(void)
2992 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
2994 + vchiq_log_info((vchiq_arm_log_level>=VCHIQ_LOG_INFO),"Suspend timer not in use");
2997 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
3006 +vchiq_doorbell_irq(int irq, void *dev_id)
3008 + VCHIQ_STATE_T *state = dev_id;
3009 + irqreturn_t ret = IRQ_NONE;
3010 + unsigned int status;
3012 + /* Read (and clear) the doorbell */
3013 + status = readl(__io_address(ARM_0_BELL0));
3015 + if (status & 0x4) { /* Was the doorbell rung? */
3016 + remote_event_pollall(state);
3017 + ret = IRQ_HANDLED;
3023 +/* There is a potential problem with partial cache lines (pages?)
3024 +** at the ends of the block when reading. If the CPU accessed anything in
3025 +** the same line (page?) then it may have pulled old data into the cache,
3026 +** obscuring the new data underneath. We can solve this by transferring the
3027 +** partial cache lines separately, and allowing the ARM to copy into the
3030 +** N.B. This implementation plays slightly fast and loose with the Linux
3031 +** driver programming rules, e.g. its use of __virt_to_bus instead of
3032 +** dma_map_single, but it isn't a multi-platform driver and it benefits
3033 +** from increased speed as a result.
3037 +create_pagelist(char __user *buf, size_t count, unsigned short type,
3038 + struct task_struct *task, PAGELIST_T ** ppagelist)
3040 + PAGELIST_T *pagelist;
3041 + struct page **pages;
3042 + struct page *page;
3043 + unsigned long *addrs;
3044 + unsigned int num_pages, offset, i;
3045 + char *addr, *base_addr, *next_addr;
3046 + int run, addridx, actual_pages;
3048 + offset = (unsigned int)buf & (PAGE_SIZE - 1);
3049 + num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
3051 + *ppagelist = NULL;
3053 + /* Allocate enough storage to hold the page pointers and the page
3056 + pagelist = kmalloc(sizeof(PAGELIST_T) +
3057 + (num_pages * sizeof(unsigned long)) +
3058 + (num_pages * sizeof(pages[0])),
3061 + vchiq_log_trace(vchiq_arm_log_level,
3062 + "create_pagelist - %x", (unsigned int)pagelist);
3066 + addrs = pagelist->addrs;
3067 + pages = (struct page **)(addrs + num_pages);
3069 + down_read(&task->mm->mmap_sem);
3070 + actual_pages = get_user_pages(task, task->mm,
3071 + (unsigned long)buf & ~(PAGE_SIZE - 1), num_pages,
3072 + (type == PAGELIST_READ) /*Write */ , 0 /*Force */ ,
3073 + pages, NULL /*vmas */);
3074 + up_read(&task->mm->mmap_sem);
3076 + if (actual_pages != num_pages)
3078 + /* This is probably due to the process being killed */
3079 + while (actual_pages > 0)
3082 + page_cache_release(pages[actual_pages]);
3085 + if (actual_pages == 0)
3086 + actual_pages = -ENOMEM;
3087 + return actual_pages;
3090 + pagelist->length = count;
3091 + pagelist->type = type;
3092 + pagelist->offset = offset;
3094 + /* Group the pages into runs of contiguous pages */
3096 + base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
3097 + next_addr = base_addr + PAGE_SIZE;
3101 + for (i = 1; i < num_pages; i++) {
3102 + addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
3103 + if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
3104 + next_addr += PAGE_SIZE;
3107 + addrs[addridx] = (unsigned long)base_addr + run;
3110 + next_addr = addr + PAGE_SIZE;
3115 + addrs[addridx] = (unsigned long)base_addr + run;
3118 + /* Partial cache lines (fragments) require special measures */
3119 + if ((type == PAGELIST_READ) &&
3120 + ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
3121 + ((pagelist->offset + pagelist->length) &
3122 + (CACHE_LINE_SIZE - 1)))) {
3123 + FRAGMENTS_T *fragments;
3125 + if (down_interruptible(&g_free_fragments_sema) != 0) {
3130 + WARN_ON(g_free_fragments == NULL);
3132 + down(&g_free_fragments_mutex);
3133 + fragments = (FRAGMENTS_T *) g_free_fragments;
3134 + WARN_ON(fragments == NULL);
3135 + g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
3136 + up(&g_free_fragments_mutex);
3138 + PAGELIST_READ_WITH_FRAGMENTS + (fragments -
3139 + g_fragments_base);
3142 + for (page = virt_to_page(pagelist);
3143 + page <= virt_to_page(addrs + num_pages - 1); page++) {
3144 + flush_dcache_page(page);
3147 + *ppagelist = pagelist;
3153 +free_pagelist(PAGELIST_T *pagelist, int actual)
3155 + struct page **pages;
3156 + unsigned int num_pages, i;
3158 + vchiq_log_trace(vchiq_arm_log_level,
3159 + "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
3162 + (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
3165 + pages = (struct page **)(pagelist->addrs + num_pages);
3167 + /* Deal with any partial cache lines (fragments) */
3168 + if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
3169 + FRAGMENTS_T *fragments = g_fragments_base +
3170 + (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS);
3171 + int head_bytes, tail_bytes;
3172 + head_bytes = (CACHE_LINE_SIZE - pagelist->offset) &
3173 + (CACHE_LINE_SIZE - 1);
3174 + tail_bytes = (pagelist->offset + actual) &
3175 + (CACHE_LINE_SIZE - 1);
3177 + if ((actual >= 0) && (head_bytes != 0)) {
3178 + if (head_bytes > actual)
3179 + head_bytes = actual;
3181 + memcpy((char *)page_address(pages[0]) +
3183 + fragments->headbuf,
3186 + if ((actual >= 0) && (head_bytes < actual) &&
3187 + (tail_bytes != 0)) {
3188 + memcpy((char *)page_address(pages[num_pages - 1]) +
3189 + ((pagelist->offset + actual) &
3190 + (PAGE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)),
3191 + fragments->tailbuf, tail_bytes);
3194 + down(&g_free_fragments_mutex);
3195 + *(FRAGMENTS_T **) fragments = g_free_fragments;
3196 + g_free_fragments = fragments;
3197 + up(&g_free_fragments_mutex);
3198 + up(&g_free_fragments_sema);
3201 + for (i = 0; i < num_pages; i++) {
3202 + if (pagelist->type != PAGELIST_WRITE)
3203 + set_page_dirty(pages[i]);
3204 + page_cache_release(pages[i]);
3209 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
3210 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h 1970-01-01 01:00:00.000000000 +0100
3211 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h 2013-07-06 15:25:50.000000000 +0100
3214 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3216 + * Redistribution and use in source and binary forms, with or without
3217 + * modification, are permitted provided that the following conditions
3219 + * 1. Redistributions of source code must retain the above copyright
3220 + * notice, this list of conditions, and the following disclaimer,
3221 + * without modification.
3222 + * 2. Redistributions in binary form must reproduce the above copyright
3223 + * notice, this list of conditions and the following disclaimer in the
3224 + * documentation and/or other materials provided with the distribution.
3225 + * 3. The names of the above-listed copyright holders may not be used
3226 + * to endorse or promote products derived from this software without
3227 + * specific prior written permission.
3229 + * ALTERNATIVELY, this software may be distributed under the terms of the
3230 + * GNU General Public License ("GPL") version 2, as published by the Free
3231 + * Software Foundation.
3233 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
3234 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
3235 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
3236 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
3237 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
3238 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
3239 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
3240 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
3241 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
3242 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3243 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3246 +#ifndef VCHIQ_2835_H
3247 +#define VCHIQ_2835_H
3249 +#include "vchiq_pagelist.h"
3251 +#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
3252 +#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
3254 +#endif /* VCHIQ_2835_H */
3255 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
3256 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c 1970-01-01 01:00:00.000000000 +0100
3257 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c 2013-07-06 15:25:50.000000000 +0100
3260 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3262 + * Redistribution and use in source and binary forms, with or without
3263 + * modification, are permitted provided that the following conditions
3265 + * 1. Redistributions of source code must retain the above copyright
3266 + * notice, this list of conditions, and the following disclaimer,
3267 + * without modification.
3268 + * 2. Redistributions in binary form must reproduce the above copyright
3269 + * notice, this list of conditions and the following disclaimer in the
3270 + * documentation and/or other materials provided with the distribution.
3271 + * 3. The names of the above-listed copyright holders may not be used
3272 + * to endorse or promote products derived from this software without
3273 + * specific prior written permission.
3275 + * ALTERNATIVELY, this software may be distributed under the terms of the
3276 + * GNU General Public License ("GPL") version 2, as published by the Free
3277 + * Software Foundation.
3279 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
3280 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
3281 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
3282 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
3283 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
3284 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
3285 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
3286 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
3287 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
3288 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3289 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3292 +#include <linux/kernel.h>
3293 +#include <linux/module.h>
3294 +#include <linux/types.h>
3295 +#include <linux/errno.h>
3296 +#include <linux/cdev.h>
3297 +#include <linux/fs.h>
3298 +#include <linux/device.h>
3299 +#include <linux/mm.h>
3300 +#include <linux/highmem.h>
3301 +#include <linux/pagemap.h>
3302 +#include <linux/bug.h>
3303 +#include <linux/semaphore.h>
3304 +#include <linux/list.h>
3305 +#include <linux/proc_fs.h>
3307 +#include "vchiq_core.h"
3308 +#include "vchiq_ioctl.h"
3309 +#include "vchiq_arm.h"
3311 +#define DEVICE_NAME "vchiq"
3313 +/* Override the default prefix, which would be vchiq_arm (from the filename) */
3314 +#undef MODULE_PARAM_PREFIX
3315 +#define MODULE_PARAM_PREFIX DEVICE_NAME "."
3317 +#define VCHIQ_MINOR 0
3319 +/* Some per-instance constants */
3320 +#define MAX_COMPLETIONS 16
3321 +#define MAX_SERVICES 64
3322 +#define MAX_ELEMENTS 8
3323 +#define MSG_QUEUE_SIZE 64
3325 +#define KEEPALIVE_VER 1
3326 +#define KEEPALIVE_VER_MIN KEEPALIVE_VER
3328 +/* Run time control of log level, based on KERN_XXX level. */
3329 +int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
3330 +int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
3332 +#define SUSPEND_TIMER_TIMEOUT_MS 100
3333 +#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
3335 +#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
3336 +static const char *const suspend_state_names[] = {
3337 + "VC_SUSPEND_FORCE_CANCELED",
3338 + "VC_SUSPEND_REJECTED",
3339 + "VC_SUSPEND_FAILED",
3340 + "VC_SUSPEND_IDLE",
3341 + "VC_SUSPEND_REQUESTED",
3342 + "VC_SUSPEND_IN_PROGRESS",
3343 + "VC_SUSPEND_SUSPENDED"
3345 +#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
3346 +static const char *const resume_state_names[] = {
3347 + "VC_RESUME_FAILED",
3349 + "VC_RESUME_REQUESTED",
3350 + "VC_RESUME_IN_PROGRESS",
3351 + "VC_RESUME_RESUMED"
3353 +/* The number of times we allow force suspend to timeout before actually
3354 +** _forcing_ suspend. This is to cater for SW which fails to release vchiq
3355 +** correctly - we don't want to prevent ARM suspend indefinitely in this case.
3357 +#define FORCE_SUSPEND_FAIL_MAX 8
3359 +/* The time in ms allowed for videocore to go idle when force suspend has been
3361 +#define FORCE_SUSPEND_TIMEOUT_MS 200
3364 +static void suspend_timer_callback(unsigned long context);
3365 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
3366 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
3369 +typedef struct user_service_struct {
3370 + VCHIQ_SERVICE_T *service;
3372 + VCHIQ_INSTANCE_T instance;
3374 + int dequeue_pending;
3375 + int message_available_pos;
3378 + struct semaphore insert_event;
3379 + struct semaphore remove_event;
3380 + VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
3383 +struct bulk_waiter_node {
3384 + struct bulk_waiter bulk_waiter;
3386 + struct list_head list;
3389 +struct vchiq_instance_struct {
3390 + VCHIQ_STATE_T *state;
3391 + VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
3392 + int completion_insert;
3393 + int completion_remove;
3394 + struct semaphore insert_event;
3395 + struct semaphore remove_event;
3396 + struct mutex completion_mutex;
3403 + struct list_head bulk_waiter_list;
3404 + struct mutex bulk_waiter_list_mutex;
3406 + struct proc_dir_entry *proc_entry;
3409 +typedef struct dump_context_struct {
3416 +static struct cdev vchiq_cdev;
3417 +static dev_t vchiq_devid;
3418 +static VCHIQ_STATE_T g_state;
3419 +static struct class *vchiq_class;
3420 +static struct device *vchiq_dev;
3421 +static DEFINE_SPINLOCK(msg_queue_spinlock);
3423 +static const char *const ioctl_names[] = {
3429 + "QUEUE_BULK_TRANSMIT",
3430 + "QUEUE_BULK_RECEIVE",
3431 + "AWAIT_COMPLETION",
3432 + "DEQUEUE_MESSAGE",
3437 + "RELEASE_SERVICE",
3438 + "SET_SERVICE_OPTION",
3442 +vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
3443 + (VCHIQ_IOC_MAX + 1));
3446 +dump_phys_mem(void *virt_addr, uint32_t num_bytes);
3448 +/****************************************************************************
3452 +***************************************************************************/
3454 +static VCHIQ_STATUS_T
3455 +add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
3456 + VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
3457 + void *bulk_userdata)
3459 + VCHIQ_COMPLETION_DATA_T *completion;
3460 + DEBUG_INITIALISE(g_state.local)
3462 + while (instance->completion_insert ==
3463 + (instance->completion_remove + MAX_COMPLETIONS)) {
3464 + /* Out of space - wait for the client */
3465 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3466 + vchiq_log_trace(vchiq_arm_log_level,
3467 + "add_completion - completion queue full");
3468 + DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
3469 + if (down_interruptible(&instance->remove_event) != 0) {
3470 + vchiq_log_info(vchiq_arm_log_level,
3471 + "service_callback interrupted");
3472 + return VCHIQ_RETRY;
3473 + } else if (instance->closing) {
3474 + vchiq_log_info(vchiq_arm_log_level,
3475 + "service_callback closing");
3476 + return VCHIQ_ERROR;
3478 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3482 + &instance->completions[instance->completion_insert &
3483 + (MAX_COMPLETIONS - 1)];
3485 + completion->header = header;
3486 + completion->reason = reason;
3487 + /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
3488 + completion->service_userdata = user_service->service;
3489 + completion->bulk_userdata = bulk_userdata;
3491 + if (reason == VCHIQ_SERVICE_CLOSED)
3492 + /* Take an extra reference, to be held until
3493 + this CLOSED notification is delivered. */
3494 + lock_service(user_service->service);
3496 + /* A write barrier is needed here to ensure that the entire completion
3497 + record is written out before the insert point. */
3500 + if (reason == VCHIQ_MESSAGE_AVAILABLE)
3501 + user_service->message_available_pos =
3502 + instance->completion_insert;
3503 + instance->completion_insert++;
3505 + up(&instance->insert_event);
3507 + return VCHIQ_SUCCESS;
3510 +/****************************************************************************
3514 +***************************************************************************/
3516 +static VCHIQ_STATUS_T
3517 +service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
3518 + VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
3520 + /* How do we ensure the callback goes to the right client?
3521 + ** The service_user data points to a USER_SERVICE_T record containing
3522 + ** the original callback and the user state structure, which contains a
3523 + ** circular buffer for completion records.
3525 + USER_SERVICE_T *user_service;
3526 + VCHIQ_SERVICE_T *service;
3527 + VCHIQ_INSTANCE_T instance;
3528 + DEBUG_INITIALISE(g_state.local)
3530 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3532 + service = handle_to_service(handle);
3534 + user_service = (USER_SERVICE_T *)service->base.userdata;
3535 + instance = user_service->instance;
3537 + if (!instance || instance->closing)
3538 + return VCHIQ_SUCCESS;
3540 + vchiq_log_trace(vchiq_arm_log_level,
3541 + "service_callback - service %lx(%d), reason %d, header %lx, "
3542 + "instance %lx, bulk_userdata %lx",
3543 + (unsigned long)user_service,
3544 + service->localport,
3545 + reason, (unsigned long)header,
3546 + (unsigned long)instance, (unsigned long)bulk_userdata);
3548 + if (header && user_service->is_vchi) {
3549 + spin_lock(&msg_queue_spinlock);
3550 + while (user_service->msg_insert ==
3551 + (user_service->msg_remove + MSG_QUEUE_SIZE)) {
3552 + spin_unlock(&msg_queue_spinlock);
3553 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3554 + DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
3555 + vchiq_log_trace(vchiq_arm_log_level,
3556 + "service_callback - msg queue full");
3557 + /* If there is no MESSAGE_AVAILABLE in the completion
3560 + if ((user_service->message_available_pos -
3561 + instance->completion_remove) < 0) {
3562 + VCHIQ_STATUS_T status;
3563 + vchiq_log_info(vchiq_arm_log_level,
3564 + "Inserting extra MESSAGE_AVAILABLE");
3565 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3566 + status = add_completion(instance, reason,
3567 + NULL, user_service, bulk_userdata);
3568 + if (status != VCHIQ_SUCCESS) {
3569 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3574 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3575 + if (down_interruptible(&user_service->remove_event)
3577 + vchiq_log_info(vchiq_arm_log_level,
3578 + "service_callback interrupted");
3579 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3580 + return VCHIQ_RETRY;
3581 + } else if (instance->closing) {
3582 + vchiq_log_info(vchiq_arm_log_level,
3583 + "service_callback closing");
3584 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3585 + return VCHIQ_ERROR;
3587 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3588 + spin_lock(&msg_queue_spinlock);
3591 + user_service->msg_queue[user_service->msg_insert &
3592 + (MSG_QUEUE_SIZE - 1)] = header;
3593 + user_service->msg_insert++;
3594 + spin_unlock(&msg_queue_spinlock);
3596 + up(&user_service->insert_event);
3598 + /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
3599 + ** there is a MESSAGE_AVAILABLE in the completion queue then
3600 + ** bypass the completion queue.
3602 + if (((user_service->message_available_pos -
3603 + instance->completion_remove) >= 0) ||
3604 + user_service->dequeue_pending) {
3605 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3606 + user_service->dequeue_pending = 0;
3607 + return VCHIQ_SUCCESS;
3612 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3614 + return add_completion(instance, reason, header, user_service,
3618 +/****************************************************************************
3622 +***************************************************************************/
3625 +vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3627 + VCHIQ_INSTANCE_T instance = file->private_data;
3628 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
3629 + VCHIQ_SERVICE_T *service = NULL;
3632 + DEBUG_INITIALISE(g_state.local)
3634 + vchiq_log_trace(vchiq_arm_log_level,
3635 + "vchiq_ioctl - instance %x, cmd %s, arg %lx",
3636 + (unsigned int)instance,
3637 + ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
3638 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
3639 + ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
3642 + case VCHIQ_IOC_SHUTDOWN:
3643 + if (!instance->connected)
3646 + /* Remove all services */
3648 + while ((service = next_service_by_instance(instance->state,
3649 + instance, &i)) != NULL) {
3650 + status = vchiq_remove_service(service->handle);
3651 + unlock_service(service);
3652 + if (status != VCHIQ_SUCCESS)
3657 + if (status == VCHIQ_SUCCESS) {
3658 + /* Wake the completion thread and ask it to exit */
3659 + instance->closing = 1;
3660 + up(&instance->insert_event);
3665 + case VCHIQ_IOC_CONNECT:
3666 + if (instance->connected) {
3670 + rc = mutex_lock_interruptible(&instance->state->mutex);
3672 + vchiq_log_error(vchiq_arm_log_level,
3673 + "vchiq: connect: could not lock mutex for "
3675 + instance->state->id, rc);
3679 + status = vchiq_connect_internal(instance->state, instance);
3680 + mutex_unlock(&instance->state->mutex);
3682 + if (status == VCHIQ_SUCCESS)
3683 + instance->connected = 1;
3685 + vchiq_log_error(vchiq_arm_log_level,
3686 + "vchiq: could not connect: %d", status);
3689 + case VCHIQ_IOC_CREATE_SERVICE: {
3690 + VCHIQ_CREATE_SERVICE_T args;
3691 + USER_SERVICE_T *user_service = NULL;
3695 + if (copy_from_user
3696 + (&args, (const void __user *)arg,
3697 + sizeof(args)) != 0) {
3702 + user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
3703 + if (!user_service) {
3708 + if (args.is_open) {
3709 + if (!instance->connected) {
3711 + kfree(user_service);
3714 + srvstate = VCHIQ_SRVSTATE_OPENING;
3717 + instance->connected ?
3718 + VCHIQ_SRVSTATE_LISTENING :
3719 + VCHIQ_SRVSTATE_HIDDEN;
3722 + userdata = args.params.userdata;
3723 + args.params.callback = service_callback;
3724 + args.params.userdata = user_service;
3725 + service = vchiq_add_service_internal(
3727 + &args.params, srvstate,
3730 + if (service != NULL) {
3731 + user_service->service = service;
3732 + user_service->userdata = userdata;
3733 + user_service->instance = instance;
3734 + user_service->is_vchi = args.is_vchi;
3735 + user_service->dequeue_pending = 0;
3736 + user_service->message_available_pos =
3737 + instance->completion_remove - 1;
3738 + user_service->msg_insert = 0;
3739 + user_service->msg_remove = 0;
3740 + sema_init(&user_service->insert_event, 0);
3741 + sema_init(&user_service->remove_event, 0);
3743 + if (args.is_open) {
3744 + status = vchiq_open_service_internal
3745 + (service, instance->pid);
3746 + if (status != VCHIQ_SUCCESS) {
3747 + vchiq_remove_service(service->handle);
3749 + ret = (status == VCHIQ_RETRY) ?
3751 + user_service->service = NULL;
3752 + user_service->instance = NULL;
3757 + if (copy_to_user((void __user *)
3758 + &(((VCHIQ_CREATE_SERVICE_T __user *)
3760 + (const void *)&service->handle,
3761 + sizeof(service->handle)) != 0) {
3763 + vchiq_remove_service(service->handle);
3764 + kfree(user_service);
3770 + kfree(user_service);
3774 + case VCHIQ_IOC_CLOSE_SERVICE: {
3775 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3777 + service = find_service_for_instance(instance, handle);
3778 + if (service != NULL)
3779 + status = vchiq_close_service(service->handle);
3784 + case VCHIQ_IOC_REMOVE_SERVICE: {
3785 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3787 + service = find_service_for_instance(instance, handle);
3788 + if (service != NULL)
3789 + status = vchiq_remove_service(service->handle);
3794 + case VCHIQ_IOC_USE_SERVICE:
3795 + case VCHIQ_IOC_RELEASE_SERVICE: {
3796 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3798 + service = find_service_for_instance(instance, handle);
3799 + if (service != NULL) {
3800 + status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
3801 + vchiq_use_service_internal(service) :
3802 + vchiq_release_service_internal(service);
3803 + if (status != VCHIQ_SUCCESS) {
3804 + vchiq_log_error(vchiq_susp_log_level,
3805 + "%s: cmd %s returned error %d for "
3806 + "service %c%c%c%c:%03d",
3808 + (cmd == VCHIQ_IOC_USE_SERVICE) ?
3809 + "VCHIQ_IOC_USE_SERVICE" :
3810 + "VCHIQ_IOC_RELEASE_SERVICE",
3812 + VCHIQ_FOURCC_AS_4CHARS(
3813 + service->base.fourcc),
3814 + service->client_id);
3821 + case VCHIQ_IOC_QUEUE_MESSAGE: {
3822 + VCHIQ_QUEUE_MESSAGE_T args;
3823 + if (copy_from_user
3824 + (&args, (const void __user *)arg,
3825 + sizeof(args)) != 0) {
3830 + service = find_service_for_instance(instance, args.handle);
3832 + if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
3833 + /* Copy elements into kernel space */
3834 + VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
3835 + if (copy_from_user(elements, args.elements,
3836 + args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
3837 + status = vchiq_queue_message
3839 + elements, args.count);
3847 + case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
3848 + case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
3849 + VCHIQ_QUEUE_BULK_TRANSFER_T args;
3850 + struct bulk_waiter_node *waiter = NULL;
3851 + VCHIQ_BULK_DIR_T dir =
3852 + (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
3853 + VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
3855 + if (copy_from_user
3856 + (&args, (const void __user *)arg,
3857 + sizeof(args)) != 0) {
3862 + service = find_service_for_instance(instance, args.handle);
3868 + if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
3869 + waiter = kzalloc(sizeof(struct bulk_waiter_node),
3875 + args.userdata = &waiter->bulk_waiter;
3876 + } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
3877 + struct list_head *pos;
3878 + mutex_lock(&instance->bulk_waiter_list_mutex);
3879 + list_for_each(pos, &instance->bulk_waiter_list) {
3880 + if (list_entry(pos, struct bulk_waiter_node,
3881 + list)->pid == current->pid) {
3882 + waiter = list_entry(pos,
3883 + struct bulk_waiter_node,
3890 + mutex_unlock(&instance->bulk_waiter_list_mutex);
3892 + vchiq_log_error(vchiq_arm_log_level,
3893 + "no bulk_waiter found for pid %d",
3898 + vchiq_log_info(vchiq_arm_log_level,
3899 + "found bulk_waiter %x for pid %d",
3900 + (unsigned int)waiter, current->pid);
3901 + args.userdata = &waiter->bulk_waiter;
3903 + status = vchiq_bulk_transfer
3905 + VCHI_MEM_HANDLE_INVALID,
3906 + args.data, args.size,
3907 + args.userdata, args.mode,
3911 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
3912 + !waiter->bulk_waiter.bulk) {
3913 + if (waiter->bulk_waiter.bulk) {
3914 + /* Cancel the signal when the transfer
3916 + spin_lock(&bulk_waiter_spinlock);
3917 + waiter->bulk_waiter.bulk->userdata = NULL;
3918 + spin_unlock(&bulk_waiter_spinlock);
3922 + const VCHIQ_BULK_MODE_T mode_waiting =
3923 + VCHIQ_BULK_MODE_WAITING;
3924 + waiter->pid = current->pid;
3925 + mutex_lock(&instance->bulk_waiter_list_mutex);
3926 + list_add(&waiter->list, &instance->bulk_waiter_list);
3927 + mutex_unlock(&instance->bulk_waiter_list_mutex);
3928 + vchiq_log_info(vchiq_arm_log_level,
3929 + "saved bulk_waiter %x for pid %d",
3930 + (unsigned int)waiter, current->pid);
3932 + if (copy_to_user((void __user *)
3933 + &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
3935 + (const void *)&mode_waiting,
3936 + sizeof(mode_waiting)) != 0)
3941 + case VCHIQ_IOC_AWAIT_COMPLETION: {
3942 + VCHIQ_AWAIT_COMPLETION_T args;
3944 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3945 + if (!instance->connected) {
3950 + if (copy_from_user(&args, (const void __user *)arg,
3951 + sizeof(args)) != 0) {
3956 + mutex_lock(&instance->completion_mutex);
3958 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3959 + while ((instance->completion_remove ==
3960 + instance->completion_insert)
3961 + && !instance->closing) {
3963 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3964 + mutex_unlock(&instance->completion_mutex);
3965 + rc = down_interruptible(&instance->insert_event);
3966 + mutex_lock(&instance->completion_mutex);
3968 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3969 + vchiq_log_info(vchiq_arm_log_level,
3970 + "AWAIT_COMPLETION interrupted");
3975 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3977 + /* A read memory barrier is needed to stop prefetch of a stale
3978 + ** completion record
3983 + int msgbufcount = args.msgbufcount;
3984 + for (ret = 0; ret < args.count; ret++) {
3985 + VCHIQ_COMPLETION_DATA_T *completion;
3986 + VCHIQ_SERVICE_T *service;
3987 + USER_SERVICE_T *user_service;
3988 + VCHIQ_HEADER_T *header;
3989 + if (instance->completion_remove ==
3990 + instance->completion_insert)
3992 + completion = &instance->completions[
3993 + instance->completion_remove &
3994 + (MAX_COMPLETIONS - 1)];
3996 + service = completion->service_userdata;
3997 + user_service = service->base.userdata;
3998 + completion->service_userdata =
3999 + user_service->userdata;
4001 + header = completion->header;
4003 + void __user *msgbuf;
4006 + msglen = header->size +
4007 + sizeof(VCHIQ_HEADER_T);
4008 + /* This must be a VCHIQ-style service */
4009 + if (args.msgbufsize < msglen) {
4011 + vchiq_arm_log_level,
4012 + "header %x: msgbufsize"
4013 + " %x < msglen %x",
4014 + (unsigned int)header,
4017 + WARN(1, "invalid message "
4023 + if (msgbufcount <= 0)
4024 + /* Stall here for lack of a
4025 + ** buffer for the message. */
4027 + /* Get the pointer from user space */
4029 + if (copy_from_user(&msgbuf,
4030 + (const void __user *)
4031 + &args.msgbufs[msgbufcount],
4032 + sizeof(msgbuf)) != 0) {
4038 + /* Copy the message to user space */
4039 + if (copy_to_user(msgbuf, header,
4046 + /* Now it has been copied, the message
4047 + ** can be released. */
4048 + vchiq_release_message(service->handle,
4051 + /* The completion must point to the
4053 + completion->header = msgbuf;
4056 + if (completion->reason ==
4057 + VCHIQ_SERVICE_CLOSED) {
4058 + unlock_service(service);
4059 + kfree(user_service);
4062 + if (copy_to_user((void __user *)(
4063 + (size_t)args.buf +
4064 + ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
4066 + sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
4072 + instance->completion_remove++;
4075 + if (msgbufcount != args.msgbufcount) {
4076 + if (copy_to_user((void __user *)
4077 + &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
4080 + sizeof(msgbufcount)) != 0) {
4087 + up(&instance->remove_event);
4088 + mutex_unlock(&instance->completion_mutex);
4089 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
4092 + case VCHIQ_IOC_DEQUEUE_MESSAGE: {
4093 + VCHIQ_DEQUEUE_MESSAGE_T args;
4094 + USER_SERVICE_T *user_service;
4095 + VCHIQ_HEADER_T *header;
4097 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
4098 + if (copy_from_user
4099 + (&args, (const void __user *)arg,
4100 + sizeof(args)) != 0) {
4104 + service = find_service_for_instance(instance, args.handle);
4109 + user_service = (USER_SERVICE_T *)service->base.userdata;
4110 + if (user_service->is_vchi == 0) {
4115 + spin_lock(&msg_queue_spinlock);
4116 + if (user_service->msg_remove == user_service->msg_insert) {
4117 + if (!args.blocking) {
4118 + spin_unlock(&msg_queue_spinlock);
4119 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
4120 + ret = -EWOULDBLOCK;
4123 + user_service->dequeue_pending = 1;
4125 + spin_unlock(&msg_queue_spinlock);
4126 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
4127 + if (down_interruptible(
4128 + &user_service->insert_event) != 0) {
4129 + vchiq_log_info(vchiq_arm_log_level,
4130 + "DEQUEUE_MESSAGE interrupted");
4134 + spin_lock(&msg_queue_spinlock);
4135 + } while (user_service->msg_remove ==
4136 + user_service->msg_insert);
4142 + BUG_ON((int)(user_service->msg_insert -
4143 + user_service->msg_remove) < 0);
4145 + header = user_service->msg_queue[user_service->msg_remove &
4146 + (MSG_QUEUE_SIZE - 1)];
4147 + user_service->msg_remove++;
4148 + spin_unlock(&msg_queue_spinlock);
4150 + up(&user_service->remove_event);
4151 + if (header == NULL)
4153 + else if (header->size <= args.bufsize) {
4154 + /* Copy to user space if msgbuf is not NULL */
4155 + if ((args.buf == NULL) ||
4156 + (copy_to_user((void __user *)args.buf,
4158 + header->size) == 0)) {
4159 + ret = header->size;
4160 + vchiq_release_message(
4166 + vchiq_log_error(vchiq_arm_log_level,
4167 + "header %x: bufsize %x < size %x",
4168 + (unsigned int)header, args.bufsize,
4170 + WARN(1, "invalid size\n");
4173 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
4176 + case VCHIQ_IOC_GET_CLIENT_ID: {
4177 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
4179 + ret = vchiq_get_client_id(handle);
4182 + case VCHIQ_IOC_GET_CONFIG: {
4183 + VCHIQ_GET_CONFIG_T args;
4184 + VCHIQ_CONFIG_T config;
4186 + if (copy_from_user(&args, (const void __user *)arg,
4187 + sizeof(args)) != 0) {
4191 + if (args.config_size > sizeof(config)) {
4195 + status = vchiq_get_config(instance, args.config_size, &config);
4196 + if (status == VCHIQ_SUCCESS) {
4197 + if (copy_to_user((void __user *)args.pconfig,
4198 + &config, args.config_size) != 0) {
4205 + case VCHIQ_IOC_SET_SERVICE_OPTION: {
4206 + VCHIQ_SET_SERVICE_OPTION_T args;
4208 + if (copy_from_user(
4209 + &args, (const void __user *)arg,
4210 + sizeof(args)) != 0) {
4215 + service = find_service_for_instance(instance, args.handle);
4221 + status = vchiq_set_service_option(
4222 + args.handle, args.option, args.value);
4225 + case VCHIQ_IOC_DUMP_PHYS_MEM: {
4226 + VCHIQ_DUMP_MEM_T args;
4228 + if (copy_from_user
4229 + (&args, (const void __user *)arg,
4230 + sizeof(args)) != 0) {
4234 + dump_phys_mem(args.virt_addr, args.num_bytes);
4243 + unlock_service(service);
4246 + if (status == VCHIQ_ERROR)
4248 + else if (status == VCHIQ_RETRY)
4252 + if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
4253 + (ret != -EWOULDBLOCK))
4254 + vchiq_log_info(vchiq_arm_log_level,
4255 + " ioctl instance %lx, cmd %s -> status %d, %ld",
4256 + (unsigned long)instance,
4257 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
4258 + ioctl_names[_IOC_NR(cmd)] :
4262 + vchiq_log_trace(vchiq_arm_log_level,
4263 + " ioctl instance %lx, cmd %s -> status %d, %ld",
4264 + (unsigned long)instance,
4265 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
4266 + ioctl_names[_IOC_NR(cmd)] :
4273 +/****************************************************************************
4277 +***************************************************************************/
4280 +vchiq_open(struct inode *inode, struct file *file)
4282 + int dev = iminor(inode) & 0x0f;
4283 + vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
4285 + case VCHIQ_MINOR: {
4287 + VCHIQ_STATE_T *state = vchiq_get_state();
4288 + VCHIQ_INSTANCE_T instance;
4291 + vchiq_log_error(vchiq_arm_log_level,
4292 + "vchiq has no connection to VideoCore");
4296 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
4300 + instance->state = state;
4301 + instance->pid = current->tgid;
4303 + ret = vchiq_proc_add_instance(instance);
4309 + sema_init(&instance->insert_event, 0);
4310 + sema_init(&instance->remove_event, 0);
4311 + mutex_init(&instance->completion_mutex);
4312 + mutex_init(&instance->bulk_waiter_list_mutex);
4313 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
4315 + file->private_data = instance;
4319 + vchiq_log_error(vchiq_arm_log_level,
4320 + "Unknown minor device: %d", dev);
4327 +/****************************************************************************
4331 +***************************************************************************/
4334 +vchiq_release(struct inode *inode, struct file *file)
4336 + int dev = iminor(inode) & 0x0f;
4339 + case VCHIQ_MINOR: {
4340 + VCHIQ_INSTANCE_T instance = file->private_data;
4341 + VCHIQ_STATE_T *state = vchiq_get_state();
4342 + VCHIQ_SERVICE_T *service;
4345 + vchiq_log_info(vchiq_arm_log_level,
4346 + "vchiq_release: instance=%lx",
4347 + (unsigned long)instance);
4354 + /* Ensure videocore is awake to allow termination. */
4355 + vchiq_use_internal(instance->state, NULL,
4358 + mutex_lock(&instance->completion_mutex);
4360 + /* Wake the completion thread and ask it to exit */
4361 + instance->closing = 1;
4362 + up(&instance->insert_event);
4364 + mutex_unlock(&instance->completion_mutex);
4366 + /* Wake the slot handler if the completion queue is full. */
4367 + up(&instance->remove_event);
4369 + /* Mark all services for termination... */
4371 + while ((service = next_service_by_instance(state, instance,
4373 + USER_SERVICE_T *user_service = service->base.userdata;
4375 + /* Wake the slot handler if the msg queue is full. */
4376 + up(&user_service->remove_event);
4378 + vchiq_terminate_service_internal(service);
4379 + unlock_service(service);
4382 + /* ...and wait for them to die */
4384 + while ((service = next_service_by_instance(state, instance, &i))
4386 + USER_SERVICE_T *user_service = service->base.userdata;
4388 + down(&service->remove_event);
4390 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
4392 + spin_lock(&msg_queue_spinlock);
4394 + while (user_service->msg_remove !=
4395 + user_service->msg_insert) {
4396 + VCHIQ_HEADER_T *header = user_service->
4397 + msg_queue[user_service->msg_remove &
4398 + (MSG_QUEUE_SIZE - 1)];
4399 + user_service->msg_remove++;
4400 + spin_unlock(&msg_queue_spinlock);
4403 + vchiq_release_message(
4406 + spin_lock(&msg_queue_spinlock);
4409 + spin_unlock(&msg_queue_spinlock);
4411 + unlock_service(service);
4412 + kfree(user_service);
4415 + /* Release any closed services */
4416 + while (instance->completion_remove !=
4417 + instance->completion_insert) {
4418 + VCHIQ_COMPLETION_DATA_T *completion;
4419 + VCHIQ_SERVICE_T *service;
4420 + completion = &instance->completions[
4421 + instance->completion_remove &
4422 + (MAX_COMPLETIONS - 1)];
4423 + service = completion->service_userdata;
4424 + if (completion->reason == VCHIQ_SERVICE_CLOSED)
4425 + unlock_service(service);
4426 + instance->completion_remove++;
4429 + /* Release the PEER service count. */
4430 + vchiq_release_internal(instance->state, NULL);
4433 + struct list_head *pos, *next;
4434 + list_for_each_safe(pos, next,
4435 + &instance->bulk_waiter_list) {
4436 + struct bulk_waiter_node *waiter;
4437 + waiter = list_entry(pos,
4438 + struct bulk_waiter_node,
4441 + vchiq_log_info(vchiq_arm_log_level,
4442 + "bulk_waiter - cleaned up %x "
4444 + (unsigned int)waiter, waiter->pid);
4449 + vchiq_proc_remove_instance(instance);
4452 + file->private_data = NULL;
4456 + vchiq_log_error(vchiq_arm_log_level,
4457 + "Unknown minor device: %d", dev);
4465 +/****************************************************************************
4469 +***************************************************************************/
4472 +vchiq_dump(void *dump_context, const char *str, int len)
4474 + DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
4476 + if (context->actual < context->space) {
4478 + if (context->offset > 0) {
4479 + int skip_bytes = min(len, (int)context->offset);
4480 + str += skip_bytes;
4481 + len -= skip_bytes;
4482 + context->offset -= skip_bytes;
4483 + if (context->offset > 0)
4486 + copy_bytes = min(len, (int)(context->space - context->actual));
4487 + if (copy_bytes == 0)
4489 + if (copy_to_user(context->buf + context->actual, str,
4491 + context->actual = -EFAULT;
4492 + context->actual += copy_bytes;
4493 + len -= copy_bytes;
4495 + /* If tne terminating NUL is included in the length, then it
4496 + ** marks the end of a line and should be replaced with a
4497 + ** carriage return. */
4498 + if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
4500 + if (copy_to_user(context->buf + context->actual - 1,
4502 + context->actual = -EFAULT;
4507 +/****************************************************************************
4509 +* vchiq_dump_platform_instance_state
4511 +***************************************************************************/
4514 +vchiq_dump_platform_instances(void *dump_context)
4516 + VCHIQ_STATE_T *state = vchiq_get_state();
4521 + /* There is no list of instances, so instead scan all services,
4522 + marking those that have been dumped. */
4524 + for (i = 0; i < state->unused_service; i++) {
4525 + VCHIQ_SERVICE_T *service = state->services[i];
4526 + VCHIQ_INSTANCE_T instance;
4528 + if (service && (service->base.callback == service_callback)) {
4529 + instance = service->instance;
4531 + instance->mark = 0;
4535 + for (i = 0; i < state->unused_service; i++) {
4536 + VCHIQ_SERVICE_T *service = state->services[i];
4537 + VCHIQ_INSTANCE_T instance;
4539 + if (service && (service->base.callback == service_callback)) {
4540 + instance = service->instance;
4541 + if (instance && !instance->mark) {
4542 + len = snprintf(buf, sizeof(buf),
4543 + "Instance %x: pid %d,%s completions "
4545 + (unsigned int)instance, instance->pid,
4546 + instance->connected ? " connected, " :
4548 + instance->completion_insert -
4549 + instance->completion_remove,
4552 + vchiq_dump(dump_context, buf, len + 1);
4554 + instance->mark = 1;
4560 +/****************************************************************************
4562 +* vchiq_dump_platform_service_state
4564 +***************************************************************************/
4567 +vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
4569 + USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
4573 + len = snprintf(buf, sizeof(buf), " instance %x",
4574 + (unsigned int)service->instance);
4576 + if ((service->base.callback == service_callback) &&
4577 + user_service->is_vchi) {
4578 + len += snprintf(buf + len, sizeof(buf) - len,
4579 + ", %d/%d messages",
4580 + user_service->msg_insert - user_service->msg_remove,
4583 + if (user_service->dequeue_pending)
4584 + len += snprintf(buf + len, sizeof(buf) - len,
4585 + " (dequeue pending)");
4588 + vchiq_dump(dump_context, buf, len + 1);
4591 +/****************************************************************************
4595 +***************************************************************************/
4598 +dump_phys_mem(void *virt_addr, uint32_t num_bytes)
4601 + uint8_t *end_virt_addr = virt_addr + num_bytes;
4607 + struct page *page;
4608 + struct page **pages;
4609 + uint8_t *kmapped_virt_ptr;
4611 + /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
4613 + virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
4614 + end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
4617 + offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
4618 + end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
4620 + num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
4622 + pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
4623 + if (pages == NULL) {
4624 + vchiq_log_error(vchiq_arm_log_level,
4625 + "Unable to allocation memory for %d pages\n",
4630 + down_read(¤t->mm->mmap_sem);
4631 + rc = get_user_pages(current, /* task */
4632 + current->mm, /* mm */
4633 + (unsigned long)virt_addr, /* start */
4634 + num_pages, /* len */
4637 + pages, /* pages (array of page pointers) */
4639 + up_read(¤t->mm->mmap_sem);
4644 + while (offset < end_offset) {
4646 + int page_offset = offset % PAGE_SIZE;
4647 + page_idx = offset / PAGE_SIZE;
4649 + if (page_idx != prev_idx) {
4653 + page = pages[page_idx];
4654 + kmapped_virt_ptr = kmap(page);
4656 + prev_idx = page_idx;
4659 + if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
4660 + vchiq_log_dump_mem("ph",
4661 + (uint32_t)(unsigned long)&kmapped_virt_ptr[
4663 + &kmapped_virt_ptr[page_offset], 16);
4670 + for (page_idx = 0; page_idx < num_pages; page_idx++)
4671 + page_cache_release(pages[page_idx]);
4676 +/****************************************************************************
4680 +***************************************************************************/
4683 +vchiq_read(struct file *file, char __user *buf,
4684 + size_t count, loff_t *ppos)
4686 + DUMP_CONTEXT_T context;
4687 + context.buf = buf;
4688 + context.actual = 0;
4689 + context.space = count;
4690 + context.offset = *ppos;
4692 + vchiq_dump_state(&context, &g_state);
4694 + *ppos += context.actual;
4696 + return context.actual;
4700 +vchiq_get_state(void)
4703 + if (g_state.remote == NULL)
4704 + printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
4705 + else if (g_state.remote->initialised != 1)
4706 + printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
4707 + __func__, g_state.remote->initialised);
4709 + return ((g_state.remote != NULL) &&
4710 + (g_state.remote->initialised == 1)) ? &g_state : NULL;
4713 +static const struct file_operations
4715 + .owner = THIS_MODULE,
4716 + .unlocked_ioctl = vchiq_ioctl,
4717 + .open = vchiq_open,
4718 + .release = vchiq_release,
4719 + .read = vchiq_read
4723 + * Autosuspend related functionality
4727 +vchiq_videocore_wanted(VCHIQ_STATE_T *state)
4729 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4731 + /* autosuspend not supported - always return wanted */
4733 + else if (arm_state->blocked_count)
4735 + else if (!arm_state->videocore_use_count)
4736 + /* usage count zero - check for override unless we're forcing */
4737 + if (arm_state->resume_blocked)
4740 + return vchiq_platform_videocore_wanted(state);
4742 + /* non-zero usage count - videocore still required */
4746 +static VCHIQ_STATUS_T
4747 +vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
4748 + VCHIQ_HEADER_T *header,
4749 + VCHIQ_SERVICE_HANDLE_T service_user,
4752 + vchiq_log_error(vchiq_susp_log_level,
4753 + "%s callback reason %d", __func__, reason);
4758 +vchiq_keepalive_thread_func(void *v)
4760 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
4761 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4763 + VCHIQ_STATUS_T status;
4764 + VCHIQ_INSTANCE_T instance;
4765 + VCHIQ_SERVICE_HANDLE_T ka_handle;
4767 + VCHIQ_SERVICE_PARAMS_T params = {
4768 + .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
4769 + .callback = vchiq_keepalive_vchiq_callback,
4770 + .version = KEEPALIVE_VER,
4771 + .version_min = KEEPALIVE_VER_MIN
4774 + status = vchiq_initialise(&instance);
4775 + if (status != VCHIQ_SUCCESS) {
4776 + vchiq_log_error(vchiq_susp_log_level,
4777 + "%s vchiq_initialise failed %d", __func__, status);
4781 + status = vchiq_connect(instance);
4782 + if (status != VCHIQ_SUCCESS) {
4783 + vchiq_log_error(vchiq_susp_log_level,
4784 + "%s vchiq_connect failed %d", __func__, status);
4788 + status = vchiq_add_service(instance, ¶ms, &ka_handle);
4789 + if (status != VCHIQ_SUCCESS) {
4790 + vchiq_log_error(vchiq_susp_log_level,
4791 + "%s vchiq_open_service failed %d", __func__, status);
4796 + long rc = 0, uc = 0;
4797 + if (wait_for_completion_interruptible(&arm_state->ka_evt)
4799 + vchiq_log_error(vchiq_susp_log_level,
4800 + "%s interrupted", __func__);
4801 + flush_signals(current);
4805 + /* read and clear counters. Do release_count then use_count to
4806 + * prevent getting more releases than uses */
4807 + rc = atomic_xchg(&arm_state->ka_release_count, 0);
4808 + uc = atomic_xchg(&arm_state->ka_use_count, 0);
4810 + /* Call use/release service the requisite number of times.
4811 + * Process use before release so use counts don't go negative */
4813 + atomic_inc(&arm_state->ka_use_ack_count);
4814 + status = vchiq_use_service(ka_handle);
4815 + if (status != VCHIQ_SUCCESS) {
4816 + vchiq_log_error(vchiq_susp_log_level,
4817 + "%s vchiq_use_service error %d",
4818 + __func__, status);
4822 + status = vchiq_release_service(ka_handle);
4823 + if (status != VCHIQ_SUCCESS) {
4824 + vchiq_log_error(vchiq_susp_log_level,
4825 + "%s vchiq_release_service error %d",
4826 + __func__, status);
4832 + vchiq_shutdown(instance);
4840 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
4842 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
4845 + rwlock_init(&arm_state->susp_res_lock);
4847 + init_completion(&arm_state->ka_evt);
4848 + atomic_set(&arm_state->ka_use_count, 0);
4849 + atomic_set(&arm_state->ka_use_ack_count, 0);
4850 + atomic_set(&arm_state->ka_release_count, 0);
4852 + init_completion(&arm_state->vc_suspend_complete);
4854 + init_completion(&arm_state->vc_resume_complete);
4855 + /* Initialise to 'done' state. We only want to block on resume
4856 + * completion while videocore is suspended. */
4857 + set_resume_state(arm_state, VC_RESUME_RESUMED);
4859 + init_completion(&arm_state->resume_blocker);
4860 + /* Initialise to 'done' state. We only want to block on this
4861 + * completion while resume is blocked */
4862 + complete_all(&arm_state->resume_blocker);
4864 + init_completion(&arm_state->blocked_blocker);
4865 + /* Initialise to 'done' state. We only want to block on this
4866 + * completion while things are waiting on the resume blocker */
4867 + complete_all(&arm_state->blocked_blocker);
4869 + arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
4870 + arm_state->suspend_timer_running = 0;
4871 + init_timer(&arm_state->suspend_timer);
4872 + arm_state->suspend_timer.data = (unsigned long)(state);
4873 + arm_state->suspend_timer.function = suspend_timer_callback;
4875 + arm_state->first_connect = 0;
4882 +** Functions to modify the state variables;
4883 +** set_suspend_state
4884 +** set_resume_state
4886 +** There are more state variables than we might like, so ensure they remain in
4887 +** step. Suspend and resume state are maintained separately, since most of
4888 +** these state machines can operate independently. However, there are a few
4889 +** states where state transitions in one state machine cause a reset to the
4890 +** other state machine. In addition, there are some completion events which
4891 +** need to occur on state machine reset and end-state(s), so these are also
4892 +** dealt with in these functions.
4894 +** In all states we set the state variable according to the input, but in some
4895 +** cases we perform additional steps outlined below;
4897 +** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
4898 +** The suspend completion is completed after any suspend
4899 +** attempt. When we reset the state machine we also reset
4900 +** the completion. This reset occurs when videocore is
4901 +** resumed, and also if we initiate suspend after a suspend
4904 +** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
4905 +** suspend - ie from this point on we must try to suspend
4906 +** before resuming can occur. We therefore also reset the
4907 +** resume state machine to VC_RESUME_IDLE in this state.
4909 +** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
4910 +** complete_all on the suspend completion to notify
4911 +** anything waiting for suspend to happen.
4913 +** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
4914 +** initiate resume, so no need to alter resume state.
4915 +** We call complete_all on the suspend completion to notify
4916 +** of suspend rejection.
4918 +** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
4919 +** suspend completion and reset the resume state machine.
4921 +** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
4922 +** resume completion is in it's 'done' state whenever
4923 +** videcore is running. Therfore, the VC_RESUME_IDLE state
4924 +** implies that videocore is suspended.
4925 +** Hence, any thread which needs to wait until videocore is
4926 +** running can wait on this completion - it will only block
4927 +** if videocore is suspended.
4929 +** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
4930 +** Call complete_all on the resume completion to unblock
4931 +** any threads waiting for resume. Also reset the suspend
4932 +** state machine to it's idle state.
4934 +** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
4938 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
4939 + enum vc_suspend_status new_state)
4941 + /* set the state in all cases */
4942 + arm_state->vc_suspend_state = new_state;
4944 + /* state specific additional actions */
4945 + switch (new_state) {
4946 + case VC_SUSPEND_FORCE_CANCELED:
4947 + complete_all(&arm_state->vc_suspend_complete);
4949 + case VC_SUSPEND_REJECTED:
4950 + complete_all(&arm_state->vc_suspend_complete);
4952 + case VC_SUSPEND_FAILED:
4953 + complete_all(&arm_state->vc_suspend_complete);
4954 + arm_state->vc_resume_state = VC_RESUME_RESUMED;
4955 + complete_all(&arm_state->vc_resume_complete);
4957 + case VC_SUSPEND_IDLE:
4958 + INIT_COMPLETION(arm_state->vc_suspend_complete);
4960 + case VC_SUSPEND_REQUESTED:
4962 + case VC_SUSPEND_IN_PROGRESS:
4963 + set_resume_state(arm_state, VC_RESUME_IDLE);
4965 + case VC_SUSPEND_SUSPENDED:
4966 + complete_all(&arm_state->vc_suspend_complete);
4975 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
4976 + enum vc_resume_status new_state)
4978 + /* set the state in all cases */
4979 + arm_state->vc_resume_state = new_state;
4981 + /* state specific additional actions */
4982 + switch (new_state) {
4983 + case VC_RESUME_FAILED:
4985 + case VC_RESUME_IDLE:
4986 + INIT_COMPLETION(arm_state->vc_resume_complete);
4988 + case VC_RESUME_REQUESTED:
4990 + case VC_RESUME_IN_PROGRESS:
4992 + case VC_RESUME_RESUMED:
4993 + complete_all(&arm_state->vc_resume_complete);
4994 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
5003 +/* should be called with the write lock held */
5005 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
5007 + del_timer(&arm_state->suspend_timer);
5008 + arm_state->suspend_timer.expires = jiffies +
5009 + msecs_to_jiffies(arm_state->
5010 + suspend_timer_timeout);
5011 + add_timer(&arm_state->suspend_timer);
5012 + arm_state->suspend_timer_running = 1;
5015 +/* should be called with the write lock held */
5017 +stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
5019 + if (arm_state->suspend_timer_running) {
5020 + del_timer(&arm_state->suspend_timer);
5021 + arm_state->suspend_timer_running = 0;
5026 +need_resume(VCHIQ_STATE_T *state)
5028 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5029 + return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
5030 + (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
5031 + vchiq_videocore_wanted(state);
5035 +block_resume(VCHIQ_ARM_STATE_T *arm_state)
5037 + int status = VCHIQ_SUCCESS;
5038 + const unsigned long timeout_val =
5039 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
5040 + int resume_count = 0;
5042 + /* Allow any threads which were blocked by the last force suspend to
5043 + * complete if they haven't already. Only give this one shot; if
5044 + * blocked_count is incremented after blocked_blocker is completed
5045 + * (which only happens when blocked_count hits 0) then those threads
5046 + * will have to wait until next time around */
5047 + if (arm_state->blocked_count) {
5048 + INIT_COMPLETION(arm_state->blocked_blocker);
5049 + write_unlock_bh(&arm_state->susp_res_lock);
5050 + vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
5051 + "blocked clients", __func__);
5052 + if (wait_for_completion_interruptible_timeout(
5053 + &arm_state->blocked_blocker, timeout_val)
5055 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
5056 + "previously blocked clients failed" , __func__);
5057 + status = VCHIQ_ERROR;
5058 + write_lock_bh(&arm_state->susp_res_lock);
5061 + vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
5062 + "clients resumed", __func__);
5063 + write_lock_bh(&arm_state->susp_res_lock);
5066 + /* We need to wait for resume to complete if it's in process */
5067 + while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
5068 + arm_state->vc_resume_state > VC_RESUME_IDLE) {
5069 + if (resume_count > 1) {
5070 + status = VCHIQ_ERROR;
5071 + vchiq_log_error(vchiq_susp_log_level, "%s waited too "
5072 + "many times for resume" , __func__);
5075 + write_unlock_bh(&arm_state->susp_res_lock);
5076 + vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
5078 + if (wait_for_completion_interruptible_timeout(
5079 + &arm_state->vc_resume_complete, timeout_val)
5081 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
5082 + "resume failed (%s)", __func__,
5083 + resume_state_names[arm_state->vc_resume_state +
5084 + VC_RESUME_NUM_OFFSET]);
5085 + status = VCHIQ_ERROR;
5086 + write_lock_bh(&arm_state->susp_res_lock);
5089 + vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
5090 + write_lock_bh(&arm_state->susp_res_lock);
5093 + INIT_COMPLETION(arm_state->resume_blocker);
5094 + arm_state->resume_blocked = 1;
5101 +unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
5103 + complete_all(&arm_state->resume_blocker);
5104 + arm_state->resume_blocked = 0;
5107 +/* Initiate suspend via slot handler. Should be called with the write lock
5110 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
5112 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
5113 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5118 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5119 + status = VCHIQ_SUCCESS;
5122 + switch (arm_state->vc_suspend_state) {
5123 + case VC_SUSPEND_REQUESTED:
5124 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
5125 + "requested", __func__);
5127 + case VC_SUSPEND_IN_PROGRESS:
5128 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
5129 + "progress", __func__);
5133 + /* We don't expect to be in other states, so log but continue
5135 + vchiq_log_error(vchiq_susp_log_level,
5136 + "%s unexpected suspend state %s", __func__,
5137 + suspend_state_names[arm_state->vc_suspend_state +
5138 + VC_SUSPEND_NUM_OFFSET]);
5139 + /* fall through */
5140 + case VC_SUSPEND_REJECTED:
5141 + case VC_SUSPEND_FAILED:
5142 + /* Ensure any idle state actions have been run */
5143 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
5144 + /* fall through */
5145 + case VC_SUSPEND_IDLE:
5146 + vchiq_log_info(vchiq_susp_log_level,
5147 + "%s: suspending", __func__);
5148 + set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
5149 + /* kick the slot handler thread to initiate suspend */
5150 + request_poll(state, NULL, 0);
5155 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
5160 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
5162 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5168 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5170 + write_lock_bh(&arm_state->susp_res_lock);
5171 + if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
5172 + arm_state->vc_resume_state == VC_RESUME_RESUMED) {
5173 + set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
5176 + write_unlock_bh(&arm_state->susp_res_lock);
5179 + vchiq_platform_suspend(state);
5182 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
5188 +output_timeout_error(VCHIQ_STATE_T *state)
5190 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5191 + char service_err[50] = "";
5192 + int vc_use_count = arm_state->videocore_use_count;
5193 + int active_services = state->unused_service;
5196 + if (!arm_state->videocore_use_count) {
5197 + snprintf(service_err, 50, " Videocore usecount is 0");
5200 + for (i = 0; i < active_services; i++) {
5201 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
5202 + if (service_ptr && service_ptr->service_use_count &&
5203 + (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
5204 + snprintf(service_err, 50, " %c%c%c%c(%d) service has "
5205 + "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
5206 + service_ptr->base.fourcc),
5207 + service_ptr->client_id,
5208 + service_ptr->service_use_count,
5209 + service_ptr->service_use_count ==
5210 + vc_use_count ? "" : " (+ more)");
5216 + vchiq_log_error(vchiq_susp_log_level,
5217 + "timed out waiting for vc suspend (%d).%s",
5218 + arm_state->autosuspend_override, service_err);
5222 +/* Try to get videocore into suspended state, regardless of autosuspend state.
5223 +** We don't actually force suspend, since videocore may get into a bad state
5224 +** if we force suspend at a bad time. Instead, we wait for autosuspend to
5225 +** determine a good point to suspend. If this doesn't happen within 100ms we
5228 +** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
5229 +** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
5232 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
5234 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5235 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
5242 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5244 + write_lock_bh(&arm_state->susp_res_lock);
5246 + status = block_resume(arm_state);
5247 + if (status != VCHIQ_SUCCESS)
5249 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
5250 + /* Already suspended - just block resume and exit */
5251 + vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
5253 + status = VCHIQ_SUCCESS;
5255 + } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
5256 + /* initiate suspend immediately in the case that we're waiting
5257 + * for the timeout */
5258 + stop_suspend_timer(arm_state);
5259 + if (!vchiq_videocore_wanted(state)) {
5260 + vchiq_log_info(vchiq_susp_log_level, "%s videocore "
5261 + "idle, initiating suspend", __func__);
5262 + status = vchiq_arm_vcsuspend(state);
5263 + } else if (arm_state->autosuspend_override <
5264 + FORCE_SUSPEND_FAIL_MAX) {
5265 + vchiq_log_info(vchiq_susp_log_level, "%s letting "
5266 + "videocore go idle", __func__);
5267 + status = VCHIQ_SUCCESS;
5269 + vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
5270 + "many times - attempting suspend", __func__);
5271 + status = vchiq_arm_vcsuspend(state);
5274 + vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
5275 + "in progress - wait for completion", __func__);
5276 + status = VCHIQ_SUCCESS;
5279 + /* Wait for suspend to happen due to system idle (not forced..) */
5280 + if (status != VCHIQ_SUCCESS)
5281 + goto unblock_resume;
5284 + write_unlock_bh(&arm_state->susp_res_lock);
5286 + rc = wait_for_completion_interruptible_timeout(
5287 + &arm_state->vc_suspend_complete,
5288 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
5290 + write_lock_bh(&arm_state->susp_res_lock);
5292 + vchiq_log_warning(vchiq_susp_log_level, "%s "
5293 + "interrupted waiting for suspend", __func__);
5294 + status = VCHIQ_ERROR;
5295 + goto unblock_resume;
5296 + } else if (rc == 0) {
5297 + if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
5298 + /* Repeat timeout once if in progress */
5304 + arm_state->autosuspend_override++;
5305 + output_timeout_error(state);
5307 + status = VCHIQ_RETRY;
5308 + goto unblock_resume;
5310 + } while (0 < (repeat--));
5312 + /* Check and report state in case we need to abort ARM suspend */
5313 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
5314 + status = VCHIQ_RETRY;
5315 + vchiq_log_error(vchiq_susp_log_level,
5316 + "%s videocore suspend failed (state %s)", __func__,
5317 + suspend_state_names[arm_state->vc_suspend_state +
5318 + VC_SUSPEND_NUM_OFFSET]);
5319 + /* Reset the state only if it's still in an error state.
5320 + * Something could have already initiated another suspend. */
5321 + if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
5322 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
5324 + goto unblock_resume;
5327 + /* successfully suspended - unlock and exit */
5331 + /* all error states need to unblock resume before exit */
5332 + unblock_resume(arm_state);
5335 + write_unlock_bh(&arm_state->susp_res_lock);
5338 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
5343 +vchiq_check_suspend(VCHIQ_STATE_T *state)
5345 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5350 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5352 + write_lock_bh(&arm_state->susp_res_lock);
5353 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
5354 + arm_state->first_connect &&
5355 + !vchiq_videocore_wanted(state)) {
5356 + vchiq_arm_vcsuspend(state);
5358 + write_unlock_bh(&arm_state->susp_res_lock);
5361 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
5367 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
5369 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5376 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5378 + write_lock_bh(&arm_state->susp_res_lock);
5379 + unblock_resume(arm_state);
5380 + resume = vchiq_check_resume(state);
5381 + write_unlock_bh(&arm_state->susp_res_lock);
5384 + if (wait_for_completion_interruptible(
5385 + &arm_state->vc_resume_complete) < 0) {
5386 + vchiq_log_error(vchiq_susp_log_level,
5387 + "%s interrupted", __func__);
5388 + /* failed, cannot accurately derive suspend
5389 + * state, so exit early. */
5394 + read_lock_bh(&arm_state->susp_res_lock);
5395 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
5396 + vchiq_log_info(vchiq_susp_log_level,
5397 + "%s: Videocore remains suspended", __func__);
5399 + vchiq_log_info(vchiq_susp_log_level,
5400 + "%s: Videocore resumed", __func__);
5403 + read_unlock_bh(&arm_state->susp_res_lock);
5405 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
5409 +/* This function should be called with the write lock held */
5411 +vchiq_check_resume(VCHIQ_STATE_T *state)
5413 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5419 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5421 + if (need_resume(state)) {
5422 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
5423 + request_poll(state, NULL, 0);
5428 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
5433 +vchiq_platform_check_resume(VCHIQ_STATE_T *state)
5435 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5441 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5443 + write_lock_bh(&arm_state->susp_res_lock);
5444 + if (arm_state->wake_address == 0) {
5445 + vchiq_log_info(vchiq_susp_log_level,
5446 + "%s: already awake", __func__);
5449 + if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
5450 + vchiq_log_info(vchiq_susp_log_level,
5451 + "%s: already resuming", __func__);
5455 + if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
5456 + set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
5459 + vchiq_log_trace(vchiq_susp_log_level,
5460 + "%s: not resuming (resume state %s)", __func__,
5461 + resume_state_names[arm_state->vc_resume_state +
5462 + VC_RESUME_NUM_OFFSET]);
5465 + write_unlock_bh(&arm_state->susp_res_lock);
5468 + vchiq_platform_resume(state);
5471 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
5479 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
5480 + enum USE_TYPE_E use_type)
5482 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5483 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
5486 + int local_uc, local_entity_uc;
5491 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5493 + if (use_type == USE_TYPE_VCHIQ) {
5494 + sprintf(entity, "VCHIQ: ");
5495 + entity_uc = &arm_state->peer_use_count;
5496 + } else if (service) {
5497 + sprintf(entity, "%c%c%c%c:%03d",
5498 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
5499 + service->client_id);
5500 + entity_uc = &service->service_use_count;
5502 + vchiq_log_error(vchiq_susp_log_level, "%s null service "
5504 + ret = VCHIQ_ERROR;
5508 + write_lock_bh(&arm_state->susp_res_lock);
5509 + while (arm_state->resume_blocked) {
5510 + /* If we call 'use' while force suspend is waiting for suspend,
5511 + * then we're about to block the thread which the force is
5512 + * waiting to complete, so we're bound to just time out. In this
5513 + * case, set the suspend state such that the wait will be
5514 + * canceled, so we can complete as quickly as possible. */
5515 + if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
5516 + VC_SUSPEND_IDLE) {
5517 + set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
5520 + /* If suspend is already in progress then we need to block */
5521 + if (!try_wait_for_completion(&arm_state->resume_blocker)) {
5522 + /* Indicate that there are threads waiting on the resume
5523 + * blocker. These need to be allowed to complete before
5524 + * a _second_ call to force suspend can complete,
5525 + * otherwise low priority threads might never actually
5527 + arm_state->blocked_count++;
5528 + write_unlock_bh(&arm_state->susp_res_lock);
5529 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
5530 + "blocked - waiting...", __func__, entity);
5531 + if (wait_for_completion_killable(
5532 + &arm_state->resume_blocker) != 0) {
5533 + vchiq_log_error(vchiq_susp_log_level, "%s %s "
5534 + "wait for resume blocker interrupted",
5535 + __func__, entity);
5536 + ret = VCHIQ_ERROR;
5537 + write_lock_bh(&arm_state->susp_res_lock);
5538 + arm_state->blocked_count--;
5539 + write_unlock_bh(&arm_state->susp_res_lock);
5542 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
5543 + "unblocked", __func__, entity);
5544 + write_lock_bh(&arm_state->susp_res_lock);
5545 + if (--arm_state->blocked_count == 0)
5546 + complete_all(&arm_state->blocked_blocker);
5550 + stop_suspend_timer(arm_state);
5552 + local_uc = ++arm_state->videocore_use_count;
5553 + local_entity_uc = ++(*entity_uc);
5555 + /* If there's a pending request which hasn't yet been serviced then
5556 + * just clear it. If we're past VC_SUSPEND_REQUESTED state then
5557 + * vc_resume_complete will block until we either resume or fail to
5559 + if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
5560 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
5562 + if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
5563 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
5564 + vchiq_log_info(vchiq_susp_log_level,
5565 + "%s %s count %d, state count %d",
5566 + __func__, entity, local_entity_uc, local_uc);
5567 + request_poll(state, NULL, 0);
5569 + vchiq_log_trace(vchiq_susp_log_level,
5570 + "%s %s count %d, state count %d",
5571 + __func__, entity, *entity_uc, local_uc);
5574 + write_unlock_bh(&arm_state->susp_res_lock);
5576 + /* Completion is in a done state when we're not suspended, so this won't
5577 + * block for the non-suspended case. */
5578 + if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
5579 + vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
5580 + __func__, entity);
5581 + if (wait_for_completion_killable(
5582 + &arm_state->vc_resume_complete) != 0) {
5583 + vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
5584 + "resume interrupted", __func__, entity);
5585 + ret = VCHIQ_ERROR;
5588 + vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
5592 + if (ret == VCHIQ_SUCCESS) {
5593 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
5594 + long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
5595 + while (ack_cnt && (status == VCHIQ_SUCCESS)) {
5596 + /* Send the use notify to videocore */
5597 + status = vchiq_send_remote_use_active(state);
5598 + if (status == VCHIQ_SUCCESS)
5601 + atomic_add(ack_cnt,
5602 + &arm_state->ka_use_ack_count);
5607 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
5612 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
5614 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5615 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
5618 + int local_uc, local_entity_uc;
5623 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5626 + sprintf(entity, "%c%c%c%c:%03d",
5627 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
5628 + service->client_id);
5629 + entity_uc = &service->service_use_count;
5631 + sprintf(entity, "PEER: ");
5632 + entity_uc = &arm_state->peer_use_count;
5635 + write_lock_bh(&arm_state->susp_res_lock);
5636 + if (!arm_state->videocore_use_count || !(*entity_uc)) {
5637 + /* Don't use BUG_ON - don't allow user thread to crash kernel */
5638 + WARN_ON(!arm_state->videocore_use_count);
5639 + WARN_ON(!(*entity_uc));
5640 + ret = VCHIQ_ERROR;
5643 + local_uc = --arm_state->videocore_use_count;
5644 + local_entity_uc = --(*entity_uc);
5646 + if (!vchiq_videocore_wanted(state)) {
5647 + if (vchiq_platform_use_suspend_timer() &&
5648 + !arm_state->resume_blocked) {
5649 + /* Only use the timer if we're not trying to force
5650 + * suspend (=> resume_blocked) */
5651 + start_suspend_timer(arm_state);
5653 + vchiq_log_info(vchiq_susp_log_level,
5654 + "%s %s count %d, state count %d - suspending",
5655 + __func__, entity, *entity_uc,
5656 + arm_state->videocore_use_count);
5657 + vchiq_arm_vcsuspend(state);
5660 + vchiq_log_trace(vchiq_susp_log_level,
5661 + "%s %s count %d, state count %d",
5662 + __func__, entity, *entity_uc,
5663 + arm_state->videocore_use_count);
5666 + write_unlock_bh(&arm_state->susp_res_lock);
5669 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
5674 +vchiq_on_remote_use(VCHIQ_STATE_T *state)
5676 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5677 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5678 + atomic_inc(&arm_state->ka_use_count);
5679 + complete(&arm_state->ka_evt);
5683 +vchiq_on_remote_release(VCHIQ_STATE_T *state)
5685 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5686 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5687 + atomic_inc(&arm_state->ka_release_count);
5688 + complete(&arm_state->ka_evt);
5692 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
5694 + return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
5698 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
5700 + return vchiq_release_internal(service->state, service);
5703 +static void suspend_timer_callback(unsigned long context)
5705 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
5706 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5709 + vchiq_log_info(vchiq_susp_log_level,
5710 + "%s - suspend timer expired - check suspend", __func__);
5711 + vchiq_check_suspend(state);
5717 +vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
5719 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5720 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5722 + ret = vchiq_use_internal(service->state, service,
5723 + USE_TYPE_SERVICE_NO_RESUME);
5724 + unlock_service(service);
5730 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
5732 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5733 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5735 + ret = vchiq_use_internal(service->state, service,
5736 + USE_TYPE_SERVICE);
5737 + unlock_service(service);
5743 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
5745 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5746 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5748 + ret = vchiq_release_internal(service->state, service);
5749 + unlock_service(service);
5755 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
5757 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5759 + /* Only dump 64 services */
5760 + static const int local_max_services = 64;
5761 + /* If there's more than 64 services, only dump ones with
5762 + * non-zero counts */
5763 + int only_nonzero = 0;
5764 + static const char *nz = "<-- preventing suspend";
5766 + enum vc_suspend_status vc_suspend_state;
5767 + enum vc_resume_status vc_resume_state;
5770 + int active_services;
5771 + struct service_data_struct {
5775 + } service_data[local_max_services];
5780 + read_lock_bh(&arm_state->susp_res_lock);
5781 + vc_suspend_state = arm_state->vc_suspend_state;
5782 + vc_resume_state = arm_state->vc_resume_state;
5783 + peer_count = arm_state->peer_use_count;
5784 + vc_use_count = arm_state->videocore_use_count;
5785 + active_services = state->unused_service;
5786 + if (active_services > local_max_services)
5789 + for (i = 0; (i < active_services) && (j < local_max_services); i++) {
5790 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
5794 + if (only_nonzero && !service_ptr->service_use_count)
5797 + if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
5798 + service_data[j].fourcc = service_ptr->base.fourcc;
5799 + service_data[j].clientid = service_ptr->client_id;
5800 + service_data[j++].use_count = service_ptr->
5801 + service_use_count;
5805 + read_unlock_bh(&arm_state->susp_res_lock);
5807 + vchiq_log_warning(vchiq_susp_log_level,
5808 + "-- Videcore suspend state: %s --",
5809 + suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
5810 + vchiq_log_warning(vchiq_susp_log_level,
5811 + "-- Videcore resume state: %s --",
5812 + resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
5815 + vchiq_log_warning(vchiq_susp_log_level, "Too many active "
5816 + "services (%d). Only dumping up to first %d services "
5817 + "with non-zero use-count", active_services,
5818 + local_max_services);
5820 + for (i = 0; i < j; i++) {
5821 + vchiq_log_warning(vchiq_susp_log_level,
5822 + "----- %c%c%c%c:%d service count %d %s",
5823 + VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
5824 + service_data[i].clientid,
5825 + service_data[i].use_count,
5826 + service_data[i].use_count ? nz : "");
5828 + vchiq_log_warning(vchiq_susp_log_level,
5829 + "----- VCHIQ use count count %d", peer_count);
5830 + vchiq_log_warning(vchiq_susp_log_level,
5831 + "--- Overall vchiq instance use count %d", vc_use_count);
5833 + vchiq_dump_platform_use_state(state);
5837 +vchiq_check_service(VCHIQ_SERVICE_T *service)
5839 + VCHIQ_ARM_STATE_T *arm_state;
5840 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5842 + if (!service || !service->state)
5845 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5847 + arm_state = vchiq_platform_get_arm_state(service->state);
5849 + read_lock_bh(&arm_state->susp_res_lock);
5850 + if (service->service_use_count)
5851 + ret = VCHIQ_SUCCESS;
5852 + read_unlock_bh(&arm_state->susp_res_lock);
5854 + if (ret == VCHIQ_ERROR) {
5855 + vchiq_log_error(vchiq_susp_log_level,
5856 + "%s ERROR - %c%c%c%c:%d service count %d, "
5857 + "state count %d, videocore suspend state %s", __func__,
5858 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
5859 + service->client_id, service->service_use_count,
5860 + arm_state->videocore_use_count,
5861 + suspend_state_names[arm_state->vc_suspend_state +
5862 + VC_SUSPEND_NUM_OFFSET]);
5863 + vchiq_dump_service_use_state(service->state);
5869 +/* stub functions */
5870 +void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
5875 +void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
5876 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
5878 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5879 + vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
5880 + get_conn_state_name(oldstate), get_conn_state_name(newstate));
5881 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
5882 + write_lock_bh(&arm_state->susp_res_lock);
5883 + if (!arm_state->first_connect) {
5884 + char threadname[10];
5885 + arm_state->first_connect = 1;
5886 + write_unlock_bh(&arm_state->susp_res_lock);
5887 + snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
5889 + arm_state->ka_thread = kthread_create(
5890 + &vchiq_keepalive_thread_func,
5893 + if (arm_state->ka_thread == NULL) {
5894 + vchiq_log_error(vchiq_susp_log_level,
5895 + "vchiq: FATAL: couldn't create thread %s",
5898 + wake_up_process(arm_state->ka_thread);
5901 + write_unlock_bh(&arm_state->susp_res_lock);
5906 +/****************************************************************************
5908 +* vchiq_init - called when the module is loaded.
5910 +***************************************************************************/
5918 + /* create proc entries */
5919 + err = vchiq_proc_init();
5921 + goto failed_proc_init;
5923 + err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
5925 + vchiq_log_error(vchiq_arm_log_level,
5926 + "Unable to allocate device number");
5927 + goto failed_alloc_chrdev;
5929 + cdev_init(&vchiq_cdev, &vchiq_fops);
5930 + vchiq_cdev.owner = THIS_MODULE;
5931 + err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
5933 + vchiq_log_error(vchiq_arm_log_level,
5934 + "Unable to register device");
5935 + goto failed_cdev_add;
5938 + /* create sysfs entries */
5939 + vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
5940 + ptr_err = vchiq_class;
5941 + if (IS_ERR(ptr_err))
5942 + goto failed_class_create;
5944 + vchiq_dev = device_create(vchiq_class, NULL,
5945 + vchiq_devid, NULL, "vchiq");
5946 + ptr_err = vchiq_dev;
5947 + if (IS_ERR(ptr_err))
5948 + goto failed_device_create;
5950 + err = vchiq_platform_init(&g_state);
5952 + goto failed_platform_init;
5954 + vchiq_log_info(vchiq_arm_log_level,
5955 + "vchiq: initialised - version %d (min %d), device %d.%d",
5956 + VCHIQ_VERSION, VCHIQ_VERSION_MIN,
5957 + MAJOR(vchiq_devid), MINOR(vchiq_devid));
5961 +failed_platform_init:
5962 + device_destroy(vchiq_class, vchiq_devid);
5963 +failed_device_create:
5964 + class_destroy(vchiq_class);
5965 +failed_class_create:
5966 + cdev_del(&vchiq_cdev);
5967 + err = PTR_ERR(ptr_err);
5969 + unregister_chrdev_region(vchiq_devid, 1);
5970 +failed_alloc_chrdev:
5971 + vchiq_proc_deinit();
5973 + vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
5977 +static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
5979 + VCHIQ_SERVICE_T *service;
5980 + int use_count = 0, i;
5982 + while ((service = next_service_by_instance(instance->state,
5983 + instance, &i)) != NULL) {
5984 + use_count += service->service_use_count;
5985 + unlock_service(service);
5990 +/* read the per-process use-count */
5991 +static int proc_read_use_count(char *page, char **start,
5992 + off_t off, int count,
5993 + int *eof, void *data)
5995 + VCHIQ_INSTANCE_T instance = data;
5996 + int len, use_count;
5998 + use_count = vchiq_instance_get_use_count(instance);
5999 + len = snprintf(page+off, count, "%d\n", use_count);
6004 +/* add an instance (process) to the proc entries */
6005 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
6008 + struct proc_dir_entry *top, *use_count;
6009 + struct proc_dir_entry *clients = vchiq_clients_top();
6010 + int pid = instance->pid;
6012 + snprintf(pidstr, sizeof(pidstr), "%d", pid);
6013 + top = proc_mkdir(pidstr, clients);
6017 + use_count = create_proc_read_entry("use_count",
6019 + proc_read_use_count,
6022 + goto fail_use_count;
6024 + instance->proc_entry = top;
6030 + remove_proc_entry(top->name, clients);
6036 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
6039 + struct proc_dir_entry *clients = vchiq_clients_top();
6040 + remove_proc_entry("use_count", instance->proc_entry);
6041 + remove_proc_entry(instance->proc_entry->name, clients);
6045 +/****************************************************************************
6047 +* vchiq_exit - called when the module is unloaded.
6049 +***************************************************************************/
6054 + vchiq_platform_exit(&g_state);
6055 + device_destroy(vchiq_class, vchiq_devid);
6056 + class_destroy(vchiq_class);
6057 + cdev_del(&vchiq_cdev);
6058 + unregister_chrdev_region(vchiq_devid, 1);
6061 +module_init(vchiq_init);
6062 +module_exit(vchiq_exit);
6063 +MODULE_LICENSE("GPL");
6064 +MODULE_AUTHOR("Broadcom Corporation");
6065 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
6066 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h 1970-01-01 01:00:00.000000000 +0100
6067 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h 2013-07-06 15:25:50.000000000 +0100
6070 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6072 + * Redistribution and use in source and binary forms, with or without
6073 + * modification, are permitted provided that the following conditions
6075 + * 1. Redistributions of source code must retain the above copyright
6076 + * notice, this list of conditions, and the following disclaimer,
6077 + * without modification.
6078 + * 2. Redistributions in binary form must reproduce the above copyright
6079 + * notice, this list of conditions and the following disclaimer in the
6080 + * documentation and/or other materials provided with the distribution.
6081 + * 3. The names of the above-listed copyright holders may not be used
6082 + * to endorse or promote products derived from this software without
6083 + * specific prior written permission.
6085 + * ALTERNATIVELY, this software may be distributed under the terms of the
6086 + * GNU General Public License ("GPL") version 2, as published by the Free
6087 + * Software Foundation.
6089 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6090 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6091 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6092 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6093 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6094 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6095 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6096 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6097 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6098 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6099 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6102 +#ifndef VCHIQ_ARM_H
6103 +#define VCHIQ_ARM_H
6105 +#include <linux/mutex.h>
6106 +#include <linux/semaphore.h>
6107 +#include <linux/atomic.h>
6108 +#include "vchiq_core.h"
6111 +enum vc_suspend_status {
6112 + VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
6113 + VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */
6114 + VC_SUSPEND_FAILED = -1, /* Videocore suspend failed */
6115 + VC_SUSPEND_IDLE = 0, /* VC active, no suspend actions */
6116 + VC_SUSPEND_REQUESTED, /* User has requested suspend */
6117 + VC_SUSPEND_IN_PROGRESS, /* Slot handler has recvd suspend request */
6118 + VC_SUSPEND_SUSPENDED /* Videocore suspend succeeded */
6121 +enum vc_resume_status {
6122 + VC_RESUME_FAILED = -1, /* Videocore resume failed */
6123 + VC_RESUME_IDLE = 0, /* VC suspended, no resume actions */
6124 + VC_RESUME_REQUESTED, /* User has requested resume */
6125 + VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
6126 + VC_RESUME_RESUMED /* Videocore resumed successfully (active) */
6132 + USE_TYPE_SERVICE_NO_RESUME,
6138 +typedef struct vchiq_arm_state_struct {
6139 + /* Keepalive-related data */
6140 + struct task_struct *ka_thread;
6141 + struct completion ka_evt;
6142 + atomic_t ka_use_count;
6143 + atomic_t ka_use_ack_count;
6144 + atomic_t ka_release_count;
6146 + struct completion vc_suspend_complete;
6147 + struct completion vc_resume_complete;
6149 + rwlock_t susp_res_lock;
6150 + enum vc_suspend_status vc_suspend_state;
6151 + enum vc_resume_status vc_resume_state;
6153 + unsigned int wake_address;
6155 + struct timer_list suspend_timer;
6156 + int suspend_timer_timeout;
6157 + int suspend_timer_running;
6159 + /* Global use count for videocore.
6160 + ** This is equal to the sum of the use counts for all services. When
6161 + ** this hits zero the videocore suspend procedure will be initiated.
6163 + int videocore_use_count;
6165 + /* Use count to track requests from videocore peer.
6166 + ** This use count is not associated with a service, so needs to be
6167 + ** tracked separately with the state.
6169 + int peer_use_count;
6171 + /* Flag to indicate whether resume is blocked. This happens when the
6172 + ** ARM is suspending
6174 + struct completion resume_blocker;
6175 + int resume_blocked;
6176 + struct completion blocked_blocker;
6177 + int blocked_count;
6179 + int autosuspend_override;
6181 + /* Flag to indicate that the first vchiq connect has made it through.
6182 + ** This means that both sides should be fully ready, and we should
6183 + ** be able to suspend after this point.
6185 + int first_connect;
6187 + unsigned long long suspend_start_time;
6188 + unsigned long long sleep_start_time;
6189 + unsigned long long resume_start_time;
6190 + unsigned long long last_wake_time;
6192 +} VCHIQ_ARM_STATE_T;
6194 +extern int vchiq_arm_log_level;
6195 +extern int vchiq_susp_log_level;
6198 +vchiq_platform_init(VCHIQ_STATE_T *state);
6201 +vchiq_platform_exit(VCHIQ_STATE_T *state);
6203 +extern VCHIQ_STATE_T *
6204 +vchiq_get_state(void);
6206 +extern VCHIQ_STATUS_T
6207 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state);
6209 +extern VCHIQ_STATUS_T
6210 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state);
6213 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state);
6215 +extern VCHIQ_STATUS_T
6216 +vchiq_arm_vcresume(VCHIQ_STATE_T *state);
6218 +extern VCHIQ_STATUS_T
6219 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state);
6222 +vchiq_check_resume(VCHIQ_STATE_T *state);
6225 +vchiq_check_suspend(VCHIQ_STATE_T *state);
6227 +extern VCHIQ_STATUS_T
6228 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
6230 +extern VCHIQ_STATUS_T
6231 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
6233 +extern VCHIQ_STATUS_T
6234 +vchiq_check_service(VCHIQ_SERVICE_T *service);
6236 +extern VCHIQ_STATUS_T
6237 +vchiq_platform_suspend(VCHIQ_STATE_T *state);
6240 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state);
6243 +vchiq_platform_use_suspend_timer(void);
6246 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state);
6249 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state);
6251 +extern VCHIQ_ARM_STATE_T*
6252 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state);
6255 +vchiq_videocore_wanted(VCHIQ_STATE_T *state);
6257 +extern VCHIQ_STATUS_T
6258 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6259 + enum USE_TYPE_E use_type);
6260 +extern VCHIQ_STATUS_T
6261 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service);
6264 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
6265 + enum vc_suspend_status new_state);
6268 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
6269 + enum vc_resume_status new_state);
6272 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
6274 +extern int vchiq_proc_init(void);
6275 +extern void vchiq_proc_deinit(void);
6276 +extern struct proc_dir_entry *vchiq_proc_top(void);
6277 +extern struct proc_dir_entry *vchiq_clients_top(void);
6280 +#endif /* VCHIQ_ARM_H */
6281 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
6282 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h 1970-01-01 01:00:00.000000000 +0100
6283 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h 2013-07-06 15:25:50.000000000 +0100
6286 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6288 + * Redistribution and use in source and binary forms, with or without
6289 + * modification, are permitted provided that the following conditions
6291 + * 1. Redistributions of source code must retain the above copyright
6292 + * notice, this list of conditions, and the following disclaimer,
6293 + * without modification.
6294 + * 2. Redistributions in binary form must reproduce the above copyright
6295 + * notice, this list of conditions and the following disclaimer in the
6296 + * documentation and/or other materials provided with the distribution.
6297 + * 3. The names of the above-listed copyright holders may not be used
6298 + * to endorse or promote products derived from this software without
6299 + * specific prior written permission.
6301 + * ALTERNATIVELY, this software may be distributed under the terms of the
6302 + * GNU General Public License ("GPL") version 2, as published by the Free
6303 + * Software Foundation.
6305 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6306 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6307 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6308 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6309 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6310 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6311 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6312 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6313 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6314 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6315 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6318 +const char *vchiq_get_build_hostname(void);
6319 +const char *vchiq_get_build_version(void);
6320 +const char *vchiq_get_build_time(void);
6321 +const char *vchiq_get_build_date(void);
6322 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
6323 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h 1970-01-01 01:00:00.000000000 +0100
6324 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h 2013-07-06 15:25:50.000000000 +0100
6327 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6329 + * Redistribution and use in source and binary forms, with or without
6330 + * modification, are permitted provided that the following conditions
6332 + * 1. Redistributions of source code must retain the above copyright
6333 + * notice, this list of conditions, and the following disclaimer,
6334 + * without modification.
6335 + * 2. Redistributions in binary form must reproduce the above copyright
6336 + * notice, this list of conditions and the following disclaimer in the
6337 + * documentation and/or other materials provided with the distribution.
6338 + * 3. The names of the above-listed copyright holders may not be used
6339 + * to endorse or promote products derived from this software without
6340 + * specific prior written permission.
6342 + * ALTERNATIVELY, this software may be distributed under the terms of the
6343 + * GNU General Public License ("GPL") version 2, as published by the Free
6344 + * Software Foundation.
6346 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6347 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6348 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6349 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6350 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6351 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6352 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6353 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6354 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6355 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6356 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6359 +#ifndef VCHIQ_CFG_H
6360 +#define VCHIQ_CFG_H
6362 +#define VCHIQ_MAGIC VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
6363 +/* The version of VCHIQ - change with any non-trivial change */
6364 +#define VCHIQ_VERSION 6
6365 +/* The minimum compatible version - update to match VCHIQ_VERSION with any
6366 +** incompatible change */
6367 +#define VCHIQ_VERSION_MIN 3
6369 +#define VCHIQ_MAX_STATES 1
6370 +#define VCHIQ_MAX_SERVICES 4096
6371 +#define VCHIQ_MAX_SLOTS 128
6372 +#define VCHIQ_MAX_SLOTS_PER_SIDE 64
6374 +#define VCHIQ_NUM_CURRENT_BULKS 32
6375 +#define VCHIQ_NUM_SERVICE_BULKS 4
6377 +#ifndef VCHIQ_ENABLE_DEBUG
6378 +#define VCHIQ_ENABLE_DEBUG 1
6381 +#ifndef VCHIQ_ENABLE_STATS
6382 +#define VCHIQ_ENABLE_STATS 1
6385 +#endif /* VCHIQ_CFG_H */
6386 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
6387 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c 1970-01-01 01:00:00.000000000 +0100
6388 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c 2013-07-06 15:25:50.000000000 +0100
6391 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6393 + * Redistribution and use in source and binary forms, with or without
6394 + * modification, are permitted provided that the following conditions
6396 + * 1. Redistributions of source code must retain the above copyright
6397 + * notice, this list of conditions, and the following disclaimer,
6398 + * without modification.
6399 + * 2. Redistributions in binary form must reproduce the above copyright
6400 + * notice, this list of conditions and the following disclaimer in the
6401 + * documentation and/or other materials provided with the distribution.
6402 + * 3. The names of the above-listed copyright holders may not be used
6403 + * to endorse or promote products derived from this software without
6404 + * specific prior written permission.
6406 + * ALTERNATIVELY, this software may be distributed under the terms of the
6407 + * GNU General Public License ("GPL") version 2, as published by the Free
6408 + * Software Foundation.
6410 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6411 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6412 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6413 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6414 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6415 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6416 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6417 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6418 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6419 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6420 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6423 +#include "vchiq_connected.h"
6424 +#include "vchiq_core.h"
6425 +#include <linux/module.h>
6426 +#include <linux/mutex.h>
6428 +#define MAX_CALLBACKS 10
6430 +static int g_connected;
6431 +static int g_num_deferred_callbacks;
6432 +static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
6433 +static int g_once_init;
6434 +static struct mutex g_connected_mutex;
6436 +/****************************************************************************
6438 +* Function to initialize our lock.
6440 +***************************************************************************/
6442 +static void connected_init(void)
6444 + if (!g_once_init) {
6445 + mutex_init(&g_connected_mutex);
6450 +/****************************************************************************
6452 +* This function is used to defer initialization until the vchiq stack is
6453 +* initialized. If the stack is already initialized, then the callback will
6454 +* be made immediately, otherwise it will be deferred until
6455 +* vchiq_call_connected_callbacks is called.
6457 +***************************************************************************/
6459 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
6463 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
6467 + /* We're already connected. Call the callback immediately. */
6471 + if (g_num_deferred_callbacks >= MAX_CALLBACKS)
6472 + vchiq_log_error(vchiq_core_log_level,
6473 + "There already %d callback registered - "
6474 + "please increase MAX_CALLBACKS",
6475 + g_num_deferred_callbacks);
6477 + g_deferred_callback[g_num_deferred_callbacks] =
6479 + g_num_deferred_callbacks++;
6482 + mutex_unlock(&g_connected_mutex);
6485 +/****************************************************************************
6487 +* This function is called by the vchiq stack once it has been connected to
6488 +* the videocore and clients can start to use the stack.
6490 +***************************************************************************/
6492 +void vchiq_call_connected_callbacks(void)
6498 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
6501 + for (i = 0; i < g_num_deferred_callbacks; i++)
6502 + g_deferred_callback[i]();
6504 + g_num_deferred_callbacks = 0;
6506 + mutex_unlock(&g_connected_mutex);
6508 +EXPORT_SYMBOL(vchiq_add_connected_callback);
6509 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
6510 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h 1970-01-01 01:00:00.000000000 +0100
6511 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h 2013-07-06 15:25:50.000000000 +0100
6514 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6516 + * Redistribution and use in source and binary forms, with or without
6517 + * modification, are permitted provided that the following conditions
6519 + * 1. Redistributions of source code must retain the above copyright
6520 + * notice, this list of conditions, and the following disclaimer,
6521 + * without modification.
6522 + * 2. Redistributions in binary form must reproduce the above copyright
6523 + * notice, this list of conditions and the following disclaimer in the
6524 + * documentation and/or other materials provided with the distribution.
6525 + * 3. The names of the above-listed copyright holders may not be used
6526 + * to endorse or promote products derived from this software without
6527 + * specific prior written permission.
6529 + * ALTERNATIVELY, this software may be distributed under the terms of the
6530 + * GNU General Public License ("GPL") version 2, as published by the Free
6531 + * Software Foundation.
6533 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6534 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6535 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6536 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6537 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6538 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6539 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6540 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6541 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6542 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6543 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6546 +#ifndef VCHIQ_CONNECTED_H
6547 +#define VCHIQ_CONNECTED_H
6549 +/* ---- Include Files ----------------------------------------------------- */
6551 +/* ---- Constants and Types ---------------------------------------------- */
6553 +typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
6555 +/* ---- Variable Externs ------------------------------------------------- */
6557 +/* ---- Function Prototypes ---------------------------------------------- */
6559 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
6560 +void vchiq_call_connected_callbacks(void);
6562 +#endif /* VCHIQ_CONNECTED_H */
6564 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
6565 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c 1970-01-01 01:00:00.000000000 +0100
6566 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c 2013-07-06 15:25:50.000000000 +0100
6569 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6571 + * Redistribution and use in source and binary forms, with or without
6572 + * modification, are permitted provided that the following conditions
6574 + * 1. Redistributions of source code must retain the above copyright
6575 + * notice, this list of conditions, and the following disclaimer,
6576 + * without modification.
6577 + * 2. Redistributions in binary form must reproduce the above copyright
6578 + * notice, this list of conditions and the following disclaimer in the
6579 + * documentation and/or other materials provided with the distribution.
6580 + * 3. The names of the above-listed copyright holders may not be used
6581 + * to endorse or promote products derived from this software without
6582 + * specific prior written permission.
6584 + * ALTERNATIVELY, this software may be distributed under the terms of the
6585 + * GNU General Public License ("GPL") version 2, as published by the Free
6586 + * Software Foundation.
6588 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6589 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6590 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6591 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6592 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6593 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6594 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6595 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6596 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6597 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6598 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6601 +#include "vchiq_core.h"
6603 +#define VCHIQ_SLOT_HANDLER_STACK 8192
6605 +#define HANDLE_STATE_SHIFT 12
6607 +#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
6608 +#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
6609 +#define SLOT_INDEX_FROM_DATA(state, data) \
6610 + (((unsigned int)((char *)data - (char *)state->slot_data)) / \
6612 +#define SLOT_INDEX_FROM_INFO(state, info) \
6613 + ((unsigned int)(info - state->slot_info))
6614 +#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
6615 + ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
6618 +#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
6621 +struct vchiq_open_payload {
6625 + short version_min;
6628 +struct vchiq_openack_payload {
6632 +/* we require this for consistency between endpoints */
6633 +vchiq_static_assert(sizeof(VCHIQ_HEADER_T) == 8);
6634 +vchiq_static_assert(IS_POW2(sizeof(VCHIQ_HEADER_T)));
6635 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
6636 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
6637 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
6638 +vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
6640 +/* Run time control of log level, based on KERN_XXX level. */
6641 +int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
6642 +int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
6643 +int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
6645 +static atomic_t pause_bulks_count = ATOMIC_INIT(0);
6647 +static DEFINE_SPINLOCK(service_spinlock);
6648 +DEFINE_SPINLOCK(bulk_waiter_spinlock);
6649 +DEFINE_SPINLOCK(quota_spinlock);
6651 +VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
6652 +static unsigned int handle_seq;
6654 +static const char *const srvstate_names[] = {
6667 +static const char *const reason_names[] = {
6670 + "MESSAGE_AVAILABLE",
6671 + "BULK_TRANSMIT_DONE",
6672 + "BULK_RECEIVE_DONE",
6673 + "BULK_TRANSMIT_ABORTED",
6674 + "BULK_RECEIVE_ABORTED"
6677 +static const char *const conn_state_names[] = {
6691 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
6693 +static const char *msg_type_str(unsigned int msg_type)
6695 + switch (msg_type) {
6696 + case VCHIQ_MSG_PADDING: return "PADDING";
6697 + case VCHIQ_MSG_CONNECT: return "CONNECT";
6698 + case VCHIQ_MSG_OPEN: return "OPEN";
6699 + case VCHIQ_MSG_OPENACK: return "OPENACK";
6700 + case VCHIQ_MSG_CLOSE: return "CLOSE";
6701 + case VCHIQ_MSG_DATA: return "DATA";
6702 + case VCHIQ_MSG_BULK_RX: return "BULK_RX";
6703 + case VCHIQ_MSG_BULK_TX: return "BULK_TX";
6704 + case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
6705 + case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
6706 + case VCHIQ_MSG_PAUSE: return "PAUSE";
6707 + case VCHIQ_MSG_RESUME: return "RESUME";
6708 + case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
6709 + case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
6710 + case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
6716 +vchiq_set_service_state(VCHIQ_SERVICE_T *service, int newstate)
6718 + vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
6719 + service->state->id, service->localport,
6720 + srvstate_names[service->srvstate],
6721 + srvstate_names[newstate]);
6722 + service->srvstate = newstate;
6726 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
6728 + VCHIQ_SERVICE_T *service;
6730 + spin_lock(&service_spinlock);
6731 + service = handle_to_service(handle);
6732 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
6733 + (service->handle == handle)) {
6734 + BUG_ON(service->ref_count == 0);
6735 + service->ref_count++;
6738 + spin_unlock(&service_spinlock);
6741 + vchiq_log_info(vchiq_core_log_level,
6742 + "Invalid service handle 0x%x", handle);
6748 +find_service_by_port(VCHIQ_STATE_T *state, int localport)
6750 + VCHIQ_SERVICE_T *service = NULL;
6751 + if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
6752 + spin_lock(&service_spinlock);
6753 + service = state->services[localport];
6754 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
6755 + BUG_ON(service->ref_count == 0);
6756 + service->ref_count++;
6759 + spin_unlock(&service_spinlock);
6763 + vchiq_log_info(vchiq_core_log_level,
6764 + "Invalid port %d", localport);
6770 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
6771 + VCHIQ_SERVICE_HANDLE_T handle) {
6772 + VCHIQ_SERVICE_T *service;
6774 + spin_lock(&service_spinlock);
6775 + service = handle_to_service(handle);
6776 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
6777 + (service->handle == handle) &&
6778 + (service->instance == instance)) {
6779 + BUG_ON(service->ref_count == 0);
6780 + service->ref_count++;
6783 + spin_unlock(&service_spinlock);
6786 + vchiq_log_info(vchiq_core_log_level,
6787 + "Invalid service handle 0x%x", handle);
6793 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
6796 + VCHIQ_SERVICE_T *service = NULL;
6799 + spin_lock(&service_spinlock);
6800 + while (idx < state->unused_service) {
6801 + VCHIQ_SERVICE_T *srv = state->services[idx++];
6802 + if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
6803 + (srv->instance == instance)) {
6805 + BUG_ON(service->ref_count == 0);
6806 + service->ref_count++;
6810 + spin_unlock(&service_spinlock);
6818 +lock_service(VCHIQ_SERVICE_T *service)
6820 + spin_lock(&service_spinlock);
6821 + BUG_ON(!service || (service->ref_count == 0));
6823 + service->ref_count++;
6824 + spin_unlock(&service_spinlock);
6828 +unlock_service(VCHIQ_SERVICE_T *service)
6830 + VCHIQ_STATE_T *state = service->state;
6831 + spin_lock(&service_spinlock);
6832 + BUG_ON(!service || (service->ref_count == 0));
6833 + if (service && service->ref_count) {
6834 + service->ref_count--;
6835 + if (!service->ref_count) {
6836 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
6837 + state->services[service->localport] = NULL;
6841 + spin_unlock(&service_spinlock);
6847 +vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
6849 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
6852 + id = service ? service->client_id : 0;
6854 + unlock_service(service);
6860 +vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
6862 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
6864 + return service ? service->base.userdata : NULL;
6868 +vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
6870 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
6872 + return service ? service->base.fourcc : 0;
6876 +mark_service_closing_internal(VCHIQ_SERVICE_T *service, int sh_thread)
6878 + VCHIQ_STATE_T *state = service->state;
6879 + VCHIQ_SERVICE_QUOTA_T *service_quota;
6881 + service->closing = 1;
6883 + /* Synchronise with other threads. */
6884 + mutex_lock(&state->recycle_mutex);
6885 + mutex_unlock(&state->recycle_mutex);
6886 + if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
6887 + /* If we're pausing then the slot_mutex is held until resume
6888 + * by the slot handler. Therefore don't try to acquire this
6889 + * mutex if we're the slot handler and in the pause sent state.
6890 + * We don't need to in this case anyway. */
6891 + mutex_lock(&state->slot_mutex);
6892 + mutex_unlock(&state->slot_mutex);
6895 + /* Unblock any sending thread. */
6896 + service_quota = &state->service_quotas[service->localport];
6897 + up(&service_quota->quota_event);
6901 +mark_service_closing(VCHIQ_SERVICE_T *service)
6903 + mark_service_closing_internal(service, 0);
6906 +static inline VCHIQ_STATUS_T
6907 +make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
6908 + VCHIQ_HEADER_T *header, void *bulk_userdata)
6910 + VCHIQ_STATUS_T status;
6911 + vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
6912 + service->state->id, service->localport, reason_names[reason],
6913 + (unsigned int)header, (unsigned int)bulk_userdata);
6914 + status = service->base.callback(reason, header, service->handle,
6916 + if (status == VCHIQ_ERROR) {
6917 + vchiq_log_warning(vchiq_core_log_level,
6918 + "%d: ignoring ERROR from callback to service %x",
6919 + service->state->id, service->handle);
6920 + status = VCHIQ_SUCCESS;
6926 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
6928 + VCHIQ_CONNSTATE_T oldstate = state->conn_state;
6929 + vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
6930 + conn_state_names[oldstate],
6931 + conn_state_names[newstate]);
6932 + state->conn_state = newstate;
6933 + vchiq_platform_conn_state_changed(state, oldstate, newstate);
6937 +remote_event_create(REMOTE_EVENT_T *event)
6940 + /* Don't clear the 'fired' flag because it may already have been set
6941 + ** by the other side. */
6942 + sema_init(event->event, 0);
6946 +remote_event_destroy(REMOTE_EVENT_T *event)
6952 +remote_event_wait(REMOTE_EVENT_T *event)
6954 + if (!event->fired) {
6957 + if (!event->fired) {
6958 + if (down_interruptible(event->event) != 0) {
6972 +remote_event_signal_local(REMOTE_EVENT_T *event)
6979 +remote_event_poll(REMOTE_EVENT_T *event)
6981 + if (event->fired && event->armed)
6982 + remote_event_signal_local(event);
6986 +remote_event_pollall(VCHIQ_STATE_T *state)
6988 + remote_event_poll(&state->local->sync_trigger);
6989 + remote_event_poll(&state->local->sync_release);
6990 + remote_event_poll(&state->local->trigger);
6991 + remote_event_poll(&state->local->recycle);
6994 +/* Round up message sizes so that any space at the end of a slot is always big
6995 +** enough for a header. This relies on header size being a power of two, which
6996 +** has been verified earlier by a static assertion. */
6998 +static inline unsigned int
6999 +calc_stride(unsigned int size)
7001 + /* Allow room for the header */
7002 + size += sizeof(VCHIQ_HEADER_T);
7005 + return (size + sizeof(VCHIQ_HEADER_T) - 1) & ~(sizeof(VCHIQ_HEADER_T)
7009 +/* Called by the slot handler thread */
7010 +static VCHIQ_SERVICE_T *
7011 +get_listening_service(VCHIQ_STATE_T *state, int fourcc)
7015 + WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
7017 + for (i = 0; i < state->unused_service; i++) {
7018 + VCHIQ_SERVICE_T *service = state->services[i];
7020 + (service->public_fourcc == fourcc) &&
7021 + ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
7022 + ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
7023 + (service->remoteport == VCHIQ_PORT_FREE)))) {
7024 + lock_service(service);
7032 +/* Called by the slot handler thread */
7033 +static VCHIQ_SERVICE_T *
7034 +get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
7037 + for (i = 0; i < state->unused_service; i++) {
7038 + VCHIQ_SERVICE_T *service = state->services[i];
7039 + if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
7040 + && (service->remoteport == port)) {
7041 + lock_service(service);
7049 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
7055 + value = atomic_read(&service->poll_flags);
7056 + } while (atomic_cmpxchg(&service->poll_flags, value,
7057 + value | (1 << poll_type)) != value);
7060 + value = atomic_read(&state->poll_services[
7061 + service->localport>>5]);
7062 + } while (atomic_cmpxchg(
7063 + &state->poll_services[service->localport>>5],
7064 + value, value | (1 << (service->localport & 0x1f)))
7068 + state->poll_needed = 1;
7071 + /* ... and ensure the slot handler runs. */
7072 + remote_event_signal_local(&state->local->trigger);
7075 +/* Called from queue_message, by the slot handler and application threads,
7076 +** with slot_mutex held */
7077 +static VCHIQ_HEADER_T *
7078 +reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
7080 + VCHIQ_SHARED_STATE_T *local = state->local;
7081 + int tx_pos = state->local_tx_pos;
7082 + int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
7084 + if (space > slot_space) {
7085 + VCHIQ_HEADER_T *header;
7086 + /* Fill the remaining space with padding */
7087 + WARN_ON(state->tx_data == NULL);
7088 + header = (VCHIQ_HEADER_T *)
7089 + (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
7090 + header->msgid = VCHIQ_MSGID_PADDING;
7091 + header->size = slot_space - sizeof(VCHIQ_HEADER_T);
7093 + tx_pos += slot_space;
7096 + /* If necessary, get the next slot. */
7097 + if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
7100 + /* If there is no free slot... */
7102 + if (down_trylock(&state->slot_available_event) != 0) {
7103 + /* ...wait for one. */
7105 + VCHIQ_STATS_INC(state, slot_stalls);
7107 + /* But first, flush through the last slot. */
7108 + state->local_tx_pos = tx_pos;
7109 + local->tx_pos = tx_pos;
7110 + remote_event_signal(&state->remote->trigger);
7112 + if (!is_blocking ||
7113 + (down_interruptible(
7114 + &state->slot_available_event) != 0))
7115 + return NULL; /* No space available */
7119 + (state->slot_queue_available * VCHIQ_SLOT_SIZE));
7121 + slot_index = local->slot_queue[
7122 + SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
7123 + VCHIQ_SLOT_QUEUE_MASK];
7125 + (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
7128 + state->local_tx_pos = tx_pos + space;
7130 + return (VCHIQ_HEADER_T *)(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
7133 +/* Called by the recycle thread. */
7135 +process_free_queue(VCHIQ_STATE_T *state)
7137 + VCHIQ_SHARED_STATE_T *local = state->local;
7138 + BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
7139 + int slot_queue_available;
7141 + /* Use a read memory barrier to ensure that any state that may have
7142 + ** been modified by another thread is not masked by stale prefetched
7146 + /* Find slots which have been freed by the other side, and return them
7147 + ** to the available queue. */
7148 + slot_queue_available = state->slot_queue_available;
7150 + while (slot_queue_available != local->slot_queue_recycle) {
7152 + int slot_index = local->slot_queue[slot_queue_available++ &
7153 + VCHIQ_SLOT_QUEUE_MASK];
7154 + char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
7155 + int data_found = 0;
7157 + vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
7158 + state->id, slot_index, (unsigned int)data,
7159 + local->slot_queue_recycle, slot_queue_available);
7161 + /* Initialise the bitmask for services which have used this
7163 + BITSET_ZERO(service_found);
7167 + while (pos < VCHIQ_SLOT_SIZE) {
7168 + VCHIQ_HEADER_T *header =
7169 + (VCHIQ_HEADER_T *)(data + pos);
7170 + int msgid = header->msgid;
7171 + if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
7172 + int port = VCHIQ_MSG_SRCPORT(msgid);
7173 + VCHIQ_SERVICE_QUOTA_T *service_quota =
7174 + &state->service_quotas[port];
7176 + spin_lock("a_spinlock);
7177 + count = service_quota->message_use_count;
7179 + service_quota->message_use_count =
7181 + spin_unlock("a_spinlock);
7183 + if (count == service_quota->message_quota)
7184 + /* Signal the service that it
7185 + ** has dropped below its quota
7187 + up(&service_quota->quota_event);
7188 + else if (count == 0) {
7189 + vchiq_log_error(vchiq_core_log_level,
7191 + "message_use_count=%d "
7192 + "(header %x, msgid %x, "
7193 + "header->msgid %x, "
7194 + "header->size %x)",
7197 + message_use_count,
7198 + (unsigned int)header, msgid,
7201 + WARN(1, "invalid message use count\n");
7203 + if (!BITSET_IS_SET(service_found, port)) {
7204 + /* Set the found bit for this service */
7205 + BITSET_SET(service_found, port);
7207 + spin_lock("a_spinlock);
7208 + count = service_quota->slot_use_count;
7210 + service_quota->slot_use_count =
7212 + spin_unlock("a_spinlock);
7215 + /* Signal the service in case
7216 + ** it has dropped below its
7218 + up(&service_quota->quota_event);
7220 + vchiq_core_log_level,
7221 + "%d: pfq:%d %x@%x - "
7225 + (unsigned int)header,
7229 + vchiq_core_log_level,
7238 + (unsigned int)header,
7242 + WARN(1, "bad slot use count\n");
7249 + pos += calc_stride(header->size);
7250 + if (pos > VCHIQ_SLOT_SIZE) {
7251 + vchiq_log_error(vchiq_core_log_level,
7252 + "pfq - pos %x: header %x, msgid %x, "
7253 + "header->msgid %x, header->size %x",
7254 + pos, (unsigned int)header, msgid,
7255 + header->msgid, header->size);
7256 + WARN(1, "invalid slot position\n");
7262 + spin_lock("a_spinlock);
7263 + count = state->data_use_count;
7265 + state->data_use_count =
7267 + spin_unlock("a_spinlock);
7268 + if (count == state->data_quota)
7269 + up(&state->data_quota_event);
7272 + state->slot_queue_available = slot_queue_available;
7273 + up(&state->slot_available_event);
7277 +/* Called by the slot handler and application threads */
7278 +static VCHIQ_STATUS_T
7279 +queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
7280 + int msgid, const VCHIQ_ELEMENT_T *elements,
7281 + int count, int size, int is_blocking)
7283 + VCHIQ_SHARED_STATE_T *local;
7284 + VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
7285 + VCHIQ_HEADER_T *header;
7286 + int type = VCHIQ_MSG_TYPE(msgid);
7288 + unsigned int stride;
7290 + local = state->local;
7292 + stride = calc_stride(size);
7294 + WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
7296 + if ((type != VCHIQ_MSG_RESUME) &&
7297 + (mutex_lock_interruptible(&state->slot_mutex) != 0))
7298 + return VCHIQ_RETRY;
7300 + if (type == VCHIQ_MSG_DATA) {
7305 + if (service->closing) {
7306 + /* The service has been closed */
7307 + mutex_unlock(&state->slot_mutex);
7308 + return VCHIQ_ERROR;
7311 + service_quota = &state->service_quotas[service->localport];
7313 + spin_lock("a_spinlock);
7315 + /* Ensure this service doesn't use more than its quota of
7316 + ** messages or slots */
7317 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
7318 + state->local_tx_pos + stride - 1);
7320 + /* Ensure data messages don't use more than their quota of
7322 + while ((tx_end_index != state->previous_data_index) &&
7323 + (state->data_use_count == state->data_quota)) {
7324 + VCHIQ_STATS_INC(state, data_stalls);
7325 + spin_unlock("a_spinlock);
7326 + mutex_unlock(&state->slot_mutex);
7328 + if (down_interruptible(&state->data_quota_event)
7330 + return VCHIQ_RETRY;
7332 + mutex_lock(&state->slot_mutex);
7333 + spin_lock("a_spinlock);
7334 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
7335 + state->local_tx_pos + stride - 1);
7336 + if ((tx_end_index == state->previous_data_index) ||
7337 + (state->data_use_count < state->data_quota)) {
7338 + /* Pass the signal on to other waiters */
7339 + up(&state->data_quota_event);
7344 + while ((service_quota->message_use_count ==
7345 + service_quota->message_quota) ||
7346 + ((tx_end_index != service_quota->previous_tx_index) &&
7347 + (service_quota->slot_use_count ==
7348 + service_quota->slot_quota))) {
7349 + spin_unlock("a_spinlock);
7350 + vchiq_log_trace(vchiq_core_log_level,
7351 + "%d: qm:%d %s,%x - quota stall "
7352 + "(msg %d, slot %d)",
7353 + state->id, service->localport,
7354 + msg_type_str(type), size,
7355 + service_quota->message_use_count,
7356 + service_quota->slot_use_count);
7357 + VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
7358 + mutex_unlock(&state->slot_mutex);
7359 + if (down_interruptible(&service_quota->quota_event)
7361 + return VCHIQ_RETRY;
7362 + if (service->closing)
7363 + return VCHIQ_ERROR;
7364 + if (mutex_lock_interruptible(&state->slot_mutex) != 0)
7365 + return VCHIQ_RETRY;
7366 + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
7367 + /* The service has been closed */
7368 + mutex_unlock(&state->slot_mutex);
7369 + return VCHIQ_ERROR;
7371 + spin_lock("a_spinlock);
7372 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
7373 + state->local_tx_pos + stride - 1);
7376 + spin_unlock("a_spinlock);
7379 + header = reserve_space(state, stride, is_blocking);
7383 + VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
7384 + mutex_unlock(&state->slot_mutex);
7385 + return VCHIQ_RETRY;
7388 + if (type == VCHIQ_MSG_DATA) {
7391 + int slot_use_count;
7393 + vchiq_log_info(vchiq_core_log_level,
7394 + "%d: qm %s@%x,%x (%d->%d)",
7396 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7397 + (unsigned int)header, size,
7398 + VCHIQ_MSG_SRCPORT(msgid),
7399 + VCHIQ_MSG_DSTPORT(msgid));
7403 + for (i = 0, pos = 0; i < (unsigned int)count;
7404 + pos += elements[i++].size)
7405 + if (elements[i].size) {
7406 + if (vchiq_copy_from_user
7407 + (header->data + pos, elements[i].data,
7408 + (size_t) elements[i].size) !=
7410 + mutex_unlock(&state->slot_mutex);
7411 + VCHIQ_SERVICE_STATS_INC(service,
7413 + return VCHIQ_ERROR;
7416 + if (vchiq_core_msg_log_level >=
7418 + vchiq_log_dump_mem("Sent", 0,
7419 + header->data + pos,
7421 + elements[0].size));
7425 + spin_lock("a_spinlock);
7426 + service_quota->message_use_count++;
7429 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
7431 + /* If this transmission can't fit in the last slot used by any
7432 + ** service, the data_use_count must be increased. */
7433 + if (tx_end_index != state->previous_data_index) {
7434 + state->previous_data_index = tx_end_index;
7435 + state->data_use_count++;
7438 + /* If this isn't the same slot last used by this service,
7439 + ** the service's slot_use_count must be increased. */
7440 + if (tx_end_index != service_quota->previous_tx_index) {
7441 + service_quota->previous_tx_index = tx_end_index;
7442 + slot_use_count = ++service_quota->slot_use_count;
7444 + slot_use_count = 0;
7447 + spin_unlock("a_spinlock);
7449 + if (slot_use_count)
7450 + vchiq_log_trace(vchiq_core_log_level,
7451 + "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
7452 + state->id, service->localport,
7453 + msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
7454 + slot_use_count, header);
7456 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
7457 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
7459 + vchiq_log_info(vchiq_core_log_level,
7460 + "%d: qm %s@%x,%x (%d->%d)", state->id,
7461 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7462 + (unsigned int)header, size,
7463 + VCHIQ_MSG_SRCPORT(msgid),
7464 + VCHIQ_MSG_DSTPORT(msgid));
7466 + WARN_ON(!((count == 1) && (size == elements[0].size)));
7467 + memcpy(header->data, elements[0].data,
7468 + elements[0].size);
7470 + VCHIQ_STATS_INC(state, ctrl_tx_count);
7473 + header->msgid = msgid;
7474 + header->size = size;
7479 + svc_fourcc = service
7480 + ? service->base.fourcc
7481 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7483 + vchiq_log_info(vchiq_core_msg_log_level,
7484 + "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
7485 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7486 + VCHIQ_MSG_TYPE(msgid),
7487 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7488 + VCHIQ_MSG_SRCPORT(msgid),
7489 + VCHIQ_MSG_DSTPORT(msgid),
7493 + /* Make sure the new header is visible to the peer. */
7496 + /* Make the new tx_pos visible to the peer. */
7497 + local->tx_pos = state->local_tx_pos;
7500 + if (service && (type == VCHIQ_MSG_CLOSE))
7501 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
7503 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
7504 + mutex_unlock(&state->slot_mutex);
7506 + remote_event_signal(&state->remote->trigger);
7508 + return VCHIQ_SUCCESS;
7511 +/* Called by the slot handler and application threads */
7512 +static VCHIQ_STATUS_T
7513 +queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
7514 + int msgid, const VCHIQ_ELEMENT_T *elements,
7515 + int count, int size, int is_blocking)
7517 + VCHIQ_SHARED_STATE_T *local;
7518 + VCHIQ_HEADER_T *header;
7520 + local = state->local;
7522 + if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
7523 + (mutex_lock_interruptible(&state->sync_mutex) != 0))
7524 + return VCHIQ_RETRY;
7526 + remote_event_wait(&local->sync_release);
7530 + header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
7531 + local->slot_sync);
7534 + int oldmsgid = header->msgid;
7535 + if (oldmsgid != VCHIQ_MSGID_PADDING)
7536 + vchiq_log_error(vchiq_core_log_level,
7537 + "%d: qms - msgid %x, not PADDING",
7538 + state->id, oldmsgid);
7544 + vchiq_log_info(vchiq_sync_log_level,
7545 + "%d: qms %s@%x,%x (%d->%d)", state->id,
7546 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7547 + (unsigned int)header, size,
7548 + VCHIQ_MSG_SRCPORT(msgid),
7549 + VCHIQ_MSG_DSTPORT(msgid));
7551 + for (i = 0, pos = 0; i < (unsigned int)count;
7552 + pos += elements[i++].size)
7553 + if (elements[i].size) {
7554 + if (vchiq_copy_from_user
7555 + (header->data + pos, elements[i].data,
7556 + (size_t) elements[i].size) !=
7558 + mutex_unlock(&state->sync_mutex);
7559 + VCHIQ_SERVICE_STATS_INC(service,
7561 + return VCHIQ_ERROR;
7564 + if (vchiq_sync_log_level >=
7566 + vchiq_log_dump_mem("Sent Sync",
7567 + 0, header->data + pos,
7569 + elements[0].size));
7573 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
7574 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
7576 + vchiq_log_info(vchiq_sync_log_level,
7577 + "%d: qms %s@%x,%x (%d->%d)", state->id,
7578 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7579 + (unsigned int)header, size,
7580 + VCHIQ_MSG_SRCPORT(msgid),
7581 + VCHIQ_MSG_DSTPORT(msgid));
7583 + WARN_ON(!((count == 1) && (size == elements[0].size)));
7584 + memcpy(header->data, elements[0].data,
7585 + elements[0].size);
7587 + VCHIQ_STATS_INC(state, ctrl_tx_count);
7590 + header->size = size;
7591 + header->msgid = msgid;
7593 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
7596 + svc_fourcc = service
7597 + ? service->base.fourcc
7598 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7600 + vchiq_log_trace(vchiq_sync_log_level,
7601 + "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
7602 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7603 + VCHIQ_MSG_TYPE(msgid),
7604 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7605 + VCHIQ_MSG_SRCPORT(msgid),
7606 + VCHIQ_MSG_DSTPORT(msgid),
7610 + /* Make sure the new header is visible to the peer. */
7613 + remote_event_signal(&state->remote->sync_trigger);
7615 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
7616 + mutex_unlock(&state->sync_mutex);
7618 + return VCHIQ_SUCCESS;
7622 +claim_slot(VCHIQ_SLOT_INFO_T *slot)
7624 + slot->use_count++;
7628 +release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
7629 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_T *service)
7631 + int release_count;
7633 + mutex_lock(&state->recycle_mutex);
7636 + int msgid = header->msgid;
7637 + if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
7638 + (service && service->closing)) {
7639 + mutex_unlock(&state->recycle_mutex);
7643 + /* Rewrite the message header to prevent a double
7645 + header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
7648 + release_count = slot_info->release_count;
7649 + slot_info->release_count = ++release_count;
7651 + if (release_count == slot_info->use_count) {
7652 + int slot_queue_recycle;
7653 + /* Add to the freed queue */
7655 + /* A read barrier is necessary here to prevent speculative
7656 + ** fetches of remote->slot_queue_recycle from overtaking the
7660 + slot_queue_recycle = state->remote->slot_queue_recycle;
7661 + state->remote->slot_queue[slot_queue_recycle &
7662 + VCHIQ_SLOT_QUEUE_MASK] =
7663 + SLOT_INDEX_FROM_INFO(state, slot_info);
7664 + state->remote->slot_queue_recycle = slot_queue_recycle + 1;
7665 + vchiq_log_info(vchiq_core_log_level,
7666 + "%d: release_slot %d - recycle->%x",
7667 + state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
7668 + state->remote->slot_queue_recycle);
7670 + /* A write barrier is necessary, but remote_event_signal
7671 + ** contains one. */
7672 + remote_event_signal(&state->remote->recycle);
7675 + mutex_unlock(&state->recycle_mutex);
7678 +/* Called by the slot handler - don't hold the bulk mutex */
7679 +static VCHIQ_STATUS_T
7680 +notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
7683 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
7685 + vchiq_log_trace(vchiq_core_log_level,
7686 + "%d: nb:%d %cx - p=%x rn=%x r=%x",
7687 + service->state->id, service->localport,
7688 + (queue == &service->bulk_tx) ? 't' : 'r',
7689 + queue->process, queue->remote_notify, queue->remove);
7691 + if (service->state->is_master) {
7692 + while (queue->remote_notify != queue->process) {
7693 + VCHIQ_BULK_T *bulk =
7694 + &queue->bulks[BULK_INDEX(queue->remote_notify)];
7695 + int msgtype = (bulk->dir == VCHIQ_BULK_TRANSMIT) ?
7696 + VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
7697 + int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
7698 + service->remoteport);
7699 + VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
7700 + /* Only reply to non-dummy bulk requests */
7701 + if (bulk->remote_data) {
7702 + status = queue_message(service->state, NULL,
7703 + msgid, &element, 1, 4, 0);
7704 + if (status != VCHIQ_SUCCESS)
7707 + queue->remote_notify++;
7710 + queue->remote_notify = queue->process;
7713 + if (status == VCHIQ_SUCCESS) {
7714 + while (queue->remove != queue->remote_notify) {
7715 + VCHIQ_BULK_T *bulk =
7716 + &queue->bulks[BULK_INDEX(queue->remove)];
7718 + /* Only generate callbacks for non-dummy bulk
7719 + ** requests, and non-terminated services */
7720 + if (bulk->data && service->instance) {
7721 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
7722 + if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
7723 + VCHIQ_SERVICE_STATS_INC(service,
7725 + VCHIQ_SERVICE_STATS_ADD(service,
7729 + VCHIQ_SERVICE_STATS_INC(service,
7731 + VCHIQ_SERVICE_STATS_ADD(service,
7736 + VCHIQ_SERVICE_STATS_INC(service,
7737 + bulk_aborted_count);
7739 + if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
7740 + struct bulk_waiter *waiter;
7741 + spin_lock(&bulk_waiter_spinlock);
7742 + waiter = bulk->userdata;
7744 + waiter->actual = bulk->actual;
7745 + up(&waiter->event);
7747 + spin_unlock(&bulk_waiter_spinlock);
7748 + } else if (bulk->mode ==
7749 + VCHIQ_BULK_MODE_CALLBACK) {
7750 + VCHIQ_REASON_T reason = (bulk->dir ==
7751 + VCHIQ_BULK_TRANSMIT) ?
7753 + VCHIQ_BULK_ACTUAL_ABORTED) ?
7754 + VCHIQ_BULK_TRANSMIT_ABORTED :
7755 + VCHIQ_BULK_TRANSMIT_DONE) :
7757 + VCHIQ_BULK_ACTUAL_ABORTED) ?
7758 + VCHIQ_BULK_RECEIVE_ABORTED :
7759 + VCHIQ_BULK_RECEIVE_DONE);
7760 + status = make_service_callback(service,
7761 + reason, NULL, bulk->userdata);
7762 + if (status == VCHIQ_RETRY)
7768 + up(&service->bulk_remove_event);
7771 + status = VCHIQ_SUCCESS;
7774 + if (status == VCHIQ_RETRY)
7775 + request_poll(service->state, service,
7776 + (queue == &service->bulk_tx) ?
7777 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
7782 +/* Called by the slot handler thread */
7784 +poll_services(VCHIQ_STATE_T *state)
7788 + for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
7790 + flags = atomic_xchg(&state->poll_services[group], 0);
7791 + for (i = 0; flags; i++) {
7792 + if (flags & (1 << i)) {
7793 + VCHIQ_SERVICE_T *service =
7794 + find_service_by_port(state,
7796 + uint32_t service_flags;
7797 + flags &= ~(1 << i);
7801 + atomic_xchg(&service->poll_flags, 0);
7802 + if (service_flags &
7803 + (1 << VCHIQ_POLL_REMOVE)) {
7804 + vchiq_log_info(vchiq_core_log_level,
7805 + "%d: ps - remove %d<->%d",
7806 + state->id, service->localport,
7807 + service->remoteport);
7809 + /* Make it look like a client, because
7810 + it must be removed and not left in
7811 + the LISTENING state. */
7812 + service->public_fourcc =
7813 + VCHIQ_FOURCC_INVALID;
7815 + if (vchiq_close_service_internal(
7816 + service, 0/*!close_recvd*/) !=
7818 + request_poll(state, service,
7819 + VCHIQ_POLL_REMOVE);
7820 + } else if (service_flags &
7821 + (1 << VCHIQ_POLL_TERMINATE)) {
7822 + vchiq_log_info(vchiq_core_log_level,
7823 + "%d: ps - terminate %d<->%d",
7824 + state->id, service->localport,
7825 + service->remoteport);
7826 + if (vchiq_close_service_internal(
7827 + service, 0/*!close_recvd*/) !=
7829 + request_poll(state, service,
7830 + VCHIQ_POLL_TERMINATE);
7832 + if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
7833 + notify_bulks(service,
7834 + &service->bulk_tx,
7836 + if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
7837 + notify_bulks(service,
7838 + &service->bulk_rx,
7840 + unlock_service(service);
7846 +/* Called by the slot handler or application threads, holding the bulk mutex. */
7848 +resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
7850 + VCHIQ_STATE_T *state = service->state;
7854 + while ((queue->process != queue->local_insert) &&
7855 + (queue->process != queue->remote_insert)) {
7856 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
7858 + vchiq_log_trace(vchiq_core_log_level,
7859 + "%d: rb:%d %cx - li=%x ri=%x p=%x",
7860 + state->id, service->localport,
7861 + (queue == &service->bulk_tx) ? 't' : 'r',
7862 + queue->local_insert, queue->remote_insert,
7865 + WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
7866 + WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
7868 + rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
7872 + vchiq_transfer_bulk(bulk);
7873 + mutex_unlock(&state->bulk_transfer_mutex);
7875 + if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
7876 + const char *header = (queue == &service->bulk_tx) ?
7877 + "Send Bulk to" : "Recv Bulk from";
7878 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
7879 + vchiq_log_info(vchiq_core_msg_log_level,
7880 + "%s %c%c%c%c d:%d len:%d %x<->%x",
7882 + VCHIQ_FOURCC_AS_4CHARS(
7883 + service->base.fourcc),
7884 + service->remoteport,
7886 + (unsigned int)bulk->data,
7887 + (unsigned int)bulk->remote_data);
7889 + vchiq_log_info(vchiq_core_msg_log_level,
7890 + "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
7891 + " rx len:%d %x<->%x",
7893 + VCHIQ_FOURCC_AS_4CHARS(
7894 + service->base.fourcc),
7895 + service->remoteport,
7897 + bulk->remote_size,
7898 + (unsigned int)bulk->data,
7899 + (unsigned int)bulk->remote_data);
7902 + vchiq_complete_bulk(bulk);
7909 +/* Called with the bulk_mutex held */
7911 +abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
7913 + int is_tx = (queue == &service->bulk_tx);
7914 + vchiq_log_trace(vchiq_core_log_level,
7915 + "%d: aob:%d %cx - li=%x ri=%x p=%x",
7916 + service->state->id, service->localport, is_tx ? 't' : 'r',
7917 + queue->local_insert, queue->remote_insert, queue->process);
7919 + WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
7920 + WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
7922 + while ((queue->process != queue->local_insert) ||
7923 + (queue->process != queue->remote_insert)) {
7924 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
7926 + if (queue->process == queue->remote_insert) {
7927 + /* fabricate a matching dummy bulk */
7928 + bulk->remote_data = NULL;
7929 + bulk->remote_size = 0;
7930 + queue->remote_insert++;
7933 + if (queue->process != queue->local_insert) {
7934 + vchiq_complete_bulk(bulk);
7936 + vchiq_log_info(vchiq_core_msg_log_level,
7937 + "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
7939 + is_tx ? "Send Bulk to" : "Recv Bulk from",
7940 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
7941 + service->remoteport,
7943 + bulk->remote_size);
7945 + /* fabricate a matching dummy bulk */
7946 + bulk->data = NULL;
7948 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
7949 + bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
7950 + VCHIQ_BULK_RECEIVE;
7951 + queue->local_insert++;
7958 +/* Called from the slot handler thread */
7960 +pause_bulks(VCHIQ_STATE_T *state)
7962 + if (unlikely(atomic_inc_return(&pause_bulks_count) != 1)) {
7964 + atomic_set(&pause_bulks_count, 1);
7968 + /* Block bulk transfers from all services */
7969 + mutex_lock(&state->bulk_transfer_mutex);
7972 +/* Called from the slot handler thread */
7974 +resume_bulks(VCHIQ_STATE_T *state)
7977 + if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
7979 + atomic_set(&pause_bulks_count, 0);
7983 + /* Allow bulk transfers from all services */
7984 + mutex_unlock(&state->bulk_transfer_mutex);
7986 + if (state->deferred_bulks == 0)
7989 + /* Deal with any bulks which had to be deferred due to being in
7990 + * paused state. Don't try to match up to number of deferred bulks
7991 + * in case we've had something come and close the service in the
7992 + * interim - just process all bulk queues for all services */
7993 + vchiq_log_info(vchiq_core_log_level, "%s: processing %d deferred bulks",
7994 + __func__, state->deferred_bulks);
7996 + for (i = 0; i < state->unused_service; i++) {
7997 + VCHIQ_SERVICE_T *service = state->services[i];
7998 + int resolved_rx = 0;
7999 + int resolved_tx = 0;
8000 + if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
8003 + mutex_lock(&service->bulk_mutex);
8004 + resolved_rx = resolve_bulks(service, &service->bulk_rx);
8005 + resolved_tx = resolve_bulks(service, &service->bulk_tx);
8006 + mutex_unlock(&service->bulk_mutex);
8008 + notify_bulks(service, &service->bulk_rx, 1);
8010 + notify_bulks(service, &service->bulk_tx, 1);
8012 + state->deferred_bulks = 0;
8016 +parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
8018 + VCHIQ_SERVICE_T *service = NULL;
8021 + unsigned int localport, remoteport;
8023 + msgid = header->msgid;
8024 + size = header->size;
8025 + type = VCHIQ_MSG_TYPE(msgid);
8026 + localport = VCHIQ_MSG_DSTPORT(msgid);
8027 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
8028 + if (size >= sizeof(struct vchiq_open_payload)) {
8029 + const struct vchiq_open_payload *payload =
8030 + (struct vchiq_open_payload *)header->data;
8031 + unsigned int fourcc;
8033 + fourcc = payload->fourcc;
8034 + vchiq_log_info(vchiq_core_log_level,
8035 + "%d: prs OPEN@%x (%d->'%c%c%c%c')",
8036 + state->id, (unsigned int)header,
8038 + VCHIQ_FOURCC_AS_4CHARS(fourcc));
8040 + service = get_listening_service(state, fourcc);
8043 + /* A matching service exists */
8044 + short version = payload->version;
8045 + short version_min = payload->version_min;
8046 + if ((service->version < version_min) ||
8047 + (version < service->version_min)) {
8048 + /* Version mismatch */
8049 + vchiq_loud_error_header();
8050 + vchiq_loud_error("%d: service %d (%c%c%c%c) "
8051 + "version mismatch - local (%d, min %d)"
8052 + " vs. remote (%d, min %d)",
8053 + state->id, service->localport,
8054 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
8055 + service->version, service->version_min,
8056 + version, version_min);
8057 + vchiq_loud_error_footer();
8058 + unlock_service(service);
8061 + service->peer_version = version;
8063 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
8064 + struct vchiq_openack_payload ack_payload = {
8067 + VCHIQ_ELEMENT_T body = {
8069 + sizeof(ack_payload)
8072 + /* Acknowledge the OPEN */
8073 + if (service->sync) {
8074 + if (queue_message_sync(state, NULL,
8076 + VCHIQ_MSG_OPENACK,
8077 + service->localport,
8079 + &body, 1, sizeof(ack_payload),
8080 + 0) == VCHIQ_RETRY)
8081 + goto bail_not_ready;
8083 + if (queue_message(state, NULL,
8085 + VCHIQ_MSG_OPENACK,
8086 + service->localport,
8088 + &body, 1, sizeof(ack_payload),
8089 + 0) == VCHIQ_RETRY)
8090 + goto bail_not_ready;
8093 + /* The service is now open */
8094 + vchiq_set_service_state(service,
8095 + service->sync ? VCHIQ_SRVSTATE_OPENSYNC
8096 + : VCHIQ_SRVSTATE_OPEN);
8099 + service->remoteport = remoteport;
8100 + service->client_id = ((int *)header->data)[1];
8101 + if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
8102 + NULL, NULL) == VCHIQ_RETRY) {
8103 + /* Bail out if not ready */
8104 + service->remoteport = VCHIQ_PORT_FREE;
8105 + goto bail_not_ready;
8108 + /* Success - the message has been dealt with */
8109 + unlock_service(service);
8115 + /* No available service, or an invalid request - send a CLOSE */
8116 + if (queue_message(state, NULL,
8117 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
8118 + NULL, 0, 0, 0) == VCHIQ_RETRY)
8119 + goto bail_not_ready;
8124 + unlock_service(service);
8129 +/* Called by the slot handler thread */
8131 +parse_rx_slots(VCHIQ_STATE_T *state)
8133 + VCHIQ_SHARED_STATE_T *remote = state->remote;
8134 + VCHIQ_SERVICE_T *service = NULL;
8136 + DEBUG_INITIALISE(state->local)
8138 + tx_pos = remote->tx_pos;
8140 + while (state->rx_pos != tx_pos) {
8141 + VCHIQ_HEADER_T *header;
8144 + unsigned int localport, remoteport;
8146 + DEBUG_TRACE(PARSE_LINE);
8147 + if (!state->rx_data) {
8149 + WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
8150 + rx_index = remote->slot_queue[
8151 + SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
8152 + VCHIQ_SLOT_QUEUE_MASK];
8153 + state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
8155 + state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
8157 + /* Initialise use_count to one, and increment
8158 + ** release_count at the end of the slot to avoid
8159 + ** releasing the slot prematurely. */
8160 + state->rx_info->use_count = 1;
8161 + state->rx_info->release_count = 0;
8164 + header = (VCHIQ_HEADER_T *)(state->rx_data +
8165 + (state->rx_pos & VCHIQ_SLOT_MASK));
8166 + DEBUG_VALUE(PARSE_HEADER, (int)header);
8167 + msgid = header->msgid;
8168 + DEBUG_VALUE(PARSE_MSGID, msgid);
8169 + size = header->size;
8170 + type = VCHIQ_MSG_TYPE(msgid);
8171 + localport = VCHIQ_MSG_DSTPORT(msgid);
8172 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
8174 + if (type != VCHIQ_MSG_DATA)
8175 + VCHIQ_STATS_INC(state, ctrl_rx_count);
8178 + case VCHIQ_MSG_OPENACK:
8179 + case VCHIQ_MSG_CLOSE:
8180 + case VCHIQ_MSG_DATA:
8181 + case VCHIQ_MSG_BULK_RX:
8182 + case VCHIQ_MSG_BULK_TX:
8183 + case VCHIQ_MSG_BULK_RX_DONE:
8184 + case VCHIQ_MSG_BULK_TX_DONE:
8185 + service = find_service_by_port(state, localport);
8186 + if ((!service || service->remoteport != remoteport) &&
8187 + (localport == 0) &&
8188 + (type == VCHIQ_MSG_CLOSE)) {
8189 + /* This could be a CLOSE from a client which
8190 + hadn't yet received the OPENACK - look for
8191 + the connected service */
8193 + unlock_service(service);
8194 + service = get_connected_service(state,
8197 + vchiq_log_warning(vchiq_core_log_level,
8198 + "%d: prs %s@%x (%d->%d) - "
8199 + "found connected service %d",
8200 + state->id, msg_type_str(type),
8201 + (unsigned int)header,
8202 + remoteport, localport,
8203 + service->localport);
8207 + vchiq_log_error(vchiq_core_log_level,
8208 + "%d: prs %s@%x (%d->%d) - "
8209 + "invalid/closed service %d",
8210 + state->id, msg_type_str(type),
8211 + (unsigned int)header,
8212 + remoteport, localport, localport);
8213 + goto skip_message;
8220 + if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
8223 + svc_fourcc = service
8224 + ? service->base.fourcc
8225 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
8226 + vchiq_log_info(vchiq_core_msg_log_level,
8227 + "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
8229 + msg_type_str(type), type,
8230 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
8231 + remoteport, localport, size);
8233 + vchiq_log_dump_mem("Rcvd", 0, header->data,
8237 + if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
8238 + > VCHIQ_SLOT_SIZE) {
8239 + vchiq_log_error(vchiq_core_log_level,
8240 + "header %x (msgid %x) - size %x too big for "
8242 + (unsigned int)header, (unsigned int)msgid,
8243 + (unsigned int)size);
8244 + WARN(1, "oversized for slot\n");
8248 + case VCHIQ_MSG_OPEN:
8249 + WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
8250 + if (!parse_open(state, header))
8251 + goto bail_not_ready;
8253 + case VCHIQ_MSG_OPENACK:
8254 + if (size >= sizeof(struct vchiq_openack_payload)) {
8255 + const struct vchiq_openack_payload *payload =
8256 + (struct vchiq_openack_payload *)
8258 + service->peer_version = payload->version;
8260 + vchiq_log_info(vchiq_core_log_level,
8261 + "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
8262 + state->id, (unsigned int)header, size,
8263 + remoteport, localport, service->peer_version);
8264 + if (service->srvstate ==
8265 + VCHIQ_SRVSTATE_OPENING) {
8266 + service->remoteport = remoteport;
8267 + vchiq_set_service_state(service,
8268 + VCHIQ_SRVSTATE_OPEN);
8269 + up(&service->remove_event);
8271 + vchiq_log_error(vchiq_core_log_level,
8272 + "OPENACK received in state %s",
8273 + srvstate_names[service->srvstate]);
8275 + case VCHIQ_MSG_CLOSE:
8276 + WARN_ON(size != 0); /* There should be no data */
8278 + vchiq_log_info(vchiq_core_log_level,
8279 + "%d: prs CLOSE@%x (%d->%d)",
8280 + state->id, (unsigned int)header,
8281 + remoteport, localport);
8283 + mark_service_closing_internal(service, 1);
8285 + if (vchiq_close_service_internal(service,
8286 + 1/*close_recvd*/) == VCHIQ_RETRY)
8287 + goto bail_not_ready;
8289 + vchiq_log_info(vchiq_core_log_level,
8290 + "Close Service %c%c%c%c s:%u d:%d",
8291 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
8292 + service->localport,
8293 + service->remoteport);
8295 + case VCHIQ_MSG_DATA:
8296 + vchiq_log_trace(vchiq_core_log_level,
8297 + "%d: prs DATA@%x,%x (%d->%d)",
8298 + state->id, (unsigned int)header, size,
8299 + remoteport, localport);
8301 + if ((service->remoteport == remoteport)
8302 + && (service->srvstate ==
8303 + VCHIQ_SRVSTATE_OPEN)) {
8304 + header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
8305 + claim_slot(state->rx_info);
8306 + DEBUG_TRACE(PARSE_LINE);
8307 + if (make_service_callback(service,
8308 + VCHIQ_MESSAGE_AVAILABLE, header,
8309 + NULL) == VCHIQ_RETRY) {
8310 + DEBUG_TRACE(PARSE_LINE);
8311 + goto bail_not_ready;
8313 + VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
8314 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
8317 + VCHIQ_STATS_INC(state, error_count);
8320 + case VCHIQ_MSG_CONNECT:
8321 + vchiq_log_info(vchiq_core_log_level,
8322 + "%d: prs CONNECT@%x",
8323 + state->id, (unsigned int)header);
8324 + up(&state->connect);
8326 + case VCHIQ_MSG_BULK_RX:
8327 + case VCHIQ_MSG_BULK_TX: {
8328 + VCHIQ_BULK_QUEUE_T *queue;
8329 + WARN_ON(!state->is_master);
8330 + queue = (type == VCHIQ_MSG_BULK_RX) ?
8331 + &service->bulk_tx : &service->bulk_rx;
8332 + if ((service->remoteport == remoteport)
8333 + && (service->srvstate ==
8334 + VCHIQ_SRVSTATE_OPEN)) {
8335 + VCHIQ_BULK_T *bulk;
8338 + DEBUG_TRACE(PARSE_LINE);
8339 + if (mutex_lock_interruptible(
8340 + &service->bulk_mutex) != 0) {
8341 + DEBUG_TRACE(PARSE_LINE);
8342 + goto bail_not_ready;
8345 + WARN_ON(!(queue->remote_insert < queue->remove +
8346 + VCHIQ_NUM_SERVICE_BULKS));
8347 + bulk = &queue->bulks[
8348 + BULK_INDEX(queue->remote_insert)];
8349 + bulk->remote_data =
8350 + (void *)((int *)header->data)[0];
8351 + bulk->remote_size = ((int *)header->data)[1];
8354 + vchiq_log_info(vchiq_core_log_level,
8355 + "%d: prs %s@%x (%d->%d) %x@%x",
8356 + state->id, msg_type_str(type),
8357 + (unsigned int)header,
8358 + remoteport, localport,
8359 + bulk->remote_size,
8360 + (unsigned int)bulk->remote_data);
8362 + queue->remote_insert++;
8364 + if (atomic_read(&pause_bulks_count)) {
8365 + state->deferred_bulks++;
8366 + vchiq_log_info(vchiq_core_log_level,
8367 + "%s: deferring bulk (%d)",
8369 + state->deferred_bulks);
8370 + if (state->conn_state !=
8371 + VCHIQ_CONNSTATE_PAUSE_SENT)
8373 + vchiq_core_log_level,
8374 + "%s: bulks paused in "
8375 + "unexpected state %s",
8378 + state->conn_state]);
8379 + } else if (state->conn_state ==
8380 + VCHIQ_CONNSTATE_CONNECTED) {
8381 + DEBUG_TRACE(PARSE_LINE);
8382 + resolved = resolve_bulks(service,
8386 + mutex_unlock(&service->bulk_mutex);
8388 + notify_bulks(service, queue,
8392 + case VCHIQ_MSG_BULK_RX_DONE:
8393 + case VCHIQ_MSG_BULK_TX_DONE:
8394 + WARN_ON(state->is_master);
8395 + if ((service->remoteport == remoteport)
8396 + && (service->srvstate !=
8397 + VCHIQ_SRVSTATE_FREE)) {
8398 + VCHIQ_BULK_QUEUE_T *queue;
8399 + VCHIQ_BULK_T *bulk;
8401 + queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
8402 + &service->bulk_rx : &service->bulk_tx;
8404 + DEBUG_TRACE(PARSE_LINE);
8405 + if (mutex_lock_interruptible(
8406 + &service->bulk_mutex) != 0) {
8407 + DEBUG_TRACE(PARSE_LINE);
8408 + goto bail_not_ready;
8410 + if ((int)(queue->remote_insert -
8411 + queue->local_insert) >= 0) {
8412 + vchiq_log_error(vchiq_core_log_level,
8413 + "%d: prs %s@%x (%d->%d) "
8414 + "unexpected (ri=%d,li=%d)",
8415 + state->id, msg_type_str(type),
8416 + (unsigned int)header,
8417 + remoteport, localport,
8418 + queue->remote_insert,
8419 + queue->local_insert);
8420 + mutex_unlock(&service->bulk_mutex);
8424 + BUG_ON(queue->process == queue->local_insert);
8425 + BUG_ON(queue->process != queue->remote_insert);
8427 + bulk = &queue->bulks[
8428 + BULK_INDEX(queue->remote_insert)];
8429 + bulk->actual = *(int *)header->data;
8430 + queue->remote_insert++;
8432 + vchiq_log_info(vchiq_core_log_level,
8433 + "%d: prs %s@%x (%d->%d) %x@%x",
8434 + state->id, msg_type_str(type),
8435 + (unsigned int)header,
8436 + remoteport, localport,
8437 + bulk->actual, (unsigned int)bulk->data);
8439 + vchiq_log_trace(vchiq_core_log_level,
8440 + "%d: prs:%d %cx li=%x ri=%x p=%x",
8441 + state->id, localport,
8442 + (type == VCHIQ_MSG_BULK_RX_DONE) ?
8444 + queue->local_insert,
8445 + queue->remote_insert, queue->process);
8447 + DEBUG_TRACE(PARSE_LINE);
8448 + WARN_ON(queue->process == queue->local_insert);
8449 + vchiq_complete_bulk(bulk);
8451 + mutex_unlock(&service->bulk_mutex);
8452 + DEBUG_TRACE(PARSE_LINE);
8453 + notify_bulks(service, queue, 1/*retry_poll*/);
8454 + DEBUG_TRACE(PARSE_LINE);
8457 + case VCHIQ_MSG_PADDING:
8458 + vchiq_log_trace(vchiq_core_log_level,
8459 + "%d: prs PADDING@%x,%x",
8460 + state->id, (unsigned int)header, size);
8462 + case VCHIQ_MSG_PAUSE:
8463 + /* If initiated, signal the application thread */
8464 + vchiq_log_trace(vchiq_core_log_level,
8465 + "%d: prs PAUSE@%x,%x",
8466 + state->id, (unsigned int)header, size);
8467 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
8468 + vchiq_log_error(vchiq_core_log_level,
8469 + "%d: PAUSE received in state PAUSED",
8473 + if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
8474 + /* Send a PAUSE in response */
8475 + if (queue_message(state, NULL,
8476 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
8477 + NULL, 0, 0, 0) == VCHIQ_RETRY)
8478 + goto bail_not_ready;
8479 + if (state->is_master)
8480 + pause_bulks(state);
8482 + /* At this point slot_mutex is held */
8483 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
8484 + vchiq_platform_paused(state);
8486 + case VCHIQ_MSG_RESUME:
8487 + vchiq_log_trace(vchiq_core_log_level,
8488 + "%d: prs RESUME@%x,%x",
8489 + state->id, (unsigned int)header, size);
8490 + /* Release the slot mutex */
8491 + mutex_unlock(&state->slot_mutex);
8492 + if (state->is_master)
8493 + resume_bulks(state);
8494 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
8495 + vchiq_platform_resumed(state);
8498 + case VCHIQ_MSG_REMOTE_USE:
8499 + vchiq_on_remote_use(state);
8501 + case VCHIQ_MSG_REMOTE_RELEASE:
8502 + vchiq_on_remote_release(state);
8504 + case VCHIQ_MSG_REMOTE_USE_ACTIVE:
8505 + vchiq_on_remote_use_active(state);
8509 + vchiq_log_error(vchiq_core_log_level,
8510 + "%d: prs invalid msgid %x@%x,%x",
8511 + state->id, msgid, (unsigned int)header, size);
8512 + WARN(1, "invalid message\n");
8518 + unlock_service(service);
8522 + state->rx_pos += calc_stride(size);
8524 + DEBUG_TRACE(PARSE_LINE);
8525 + /* Perform some housekeeping when the end of the slot is
8527 + if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
8528 + /* Remove the extra reference count. */
8529 + release_slot(state, state->rx_info, NULL, NULL);
8530 + state->rx_data = NULL;
8536 + unlock_service(service);
8539 +/* Called by the slot handler thread */
8541 +slot_handler_func(void *v)
8543 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
8544 + VCHIQ_SHARED_STATE_T *local = state->local;
8545 + DEBUG_INITIALISE(local)
8548 + DEBUG_COUNT(SLOT_HANDLER_COUNT);
8549 + DEBUG_TRACE(SLOT_HANDLER_LINE);
8550 + remote_event_wait(&local->trigger);
8554 + DEBUG_TRACE(SLOT_HANDLER_LINE);
8555 + if (state->poll_needed) {
8556 + /* Check if we need to suspend - may change our
8558 + vchiq_platform_check_suspend(state);
8560 + state->poll_needed = 0;
8562 + /* Handle service polling and other rare conditions here
8563 + ** out of the mainline code */
8564 + switch (state->conn_state) {
8565 + case VCHIQ_CONNSTATE_CONNECTED:
8566 + /* Poll the services as requested */
8567 + poll_services(state);
8570 + case VCHIQ_CONNSTATE_PAUSING:
8571 + if (state->is_master)
8572 + pause_bulks(state);
8573 + if (queue_message(state, NULL,
8574 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
8575 + NULL, 0, 0, 0) != VCHIQ_RETRY) {
8576 + vchiq_set_conn_state(state,
8577 + VCHIQ_CONNSTATE_PAUSE_SENT);
8579 + if (state->is_master)
8580 + resume_bulks(state);
8582 + state->poll_needed = 1;
8586 + case VCHIQ_CONNSTATE_PAUSED:
8587 + vchiq_platform_resume(state);
8590 + case VCHIQ_CONNSTATE_RESUMING:
8591 + if (queue_message(state, NULL,
8592 + VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
8593 + NULL, 0, 0, 0) != VCHIQ_RETRY) {
8594 + if (state->is_master)
8595 + resume_bulks(state);
8596 + vchiq_set_conn_state(state,
8597 + VCHIQ_CONNSTATE_CONNECTED);
8598 + vchiq_platform_resumed(state);
8600 + /* This should really be impossible,
8601 + ** since the PAUSE should have flushed
8602 + ** through outstanding messages. */
8603 + vchiq_log_error(vchiq_core_log_level,
8604 + "Failed to send RESUME "
8610 + case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
8611 + case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
8612 + vchiq_platform_handle_timeout(state);
8621 + DEBUG_TRACE(SLOT_HANDLER_LINE);
8622 + parse_rx_slots(state);
8628 +/* Called by the recycle thread */
8630 +recycle_func(void *v)
8632 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
8633 + VCHIQ_SHARED_STATE_T *local = state->local;
8636 + remote_event_wait(&local->recycle);
8638 + process_free_queue(state);
8644 +/* Called by the sync thread */
8648 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
8649 + VCHIQ_SHARED_STATE_T *local = state->local;
8650 + VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
8651 + state->remote->slot_sync);
8654 + VCHIQ_SERVICE_T *service;
8657 + unsigned int localport, remoteport;
8659 + remote_event_wait(&local->sync_trigger);
8663 + msgid = header->msgid;
8664 + size = header->size;
8665 + type = VCHIQ_MSG_TYPE(msgid);
8666 + localport = VCHIQ_MSG_DSTPORT(msgid);
8667 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
8669 + service = find_service_by_port(state, localport);
8672 + vchiq_log_error(vchiq_sync_log_level,
8673 + "%d: sf %s@%x (%d->%d) - "
8674 + "invalid/closed service %d",
8675 + state->id, msg_type_str(type),
8676 + (unsigned int)header,
8677 + remoteport, localport, localport);
8678 + release_message_sync(state, header);
8682 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
8685 + svc_fourcc = service
8686 + ? service->base.fourcc
8687 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
8688 + vchiq_log_trace(vchiq_sync_log_level,
8689 + "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
8690 + msg_type_str(type),
8691 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
8692 + remoteport, localport, size);
8694 + vchiq_log_dump_mem("Rcvd", 0, header->data,
8699 + case VCHIQ_MSG_OPENACK:
8700 + if (size >= sizeof(struct vchiq_openack_payload)) {
8701 + const struct vchiq_openack_payload *payload =
8702 + (struct vchiq_openack_payload *)
8704 + service->peer_version = payload->version;
8706 + vchiq_log_info(vchiq_sync_log_level,
8707 + "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
8708 + state->id, (unsigned int)header, size,
8709 + remoteport, localport, service->peer_version);
8710 + if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
8711 + service->remoteport = remoteport;
8712 + vchiq_set_service_state(service,
8713 + VCHIQ_SRVSTATE_OPENSYNC);
8714 + up(&service->remove_event);
8716 + release_message_sync(state, header);
8719 + case VCHIQ_MSG_DATA:
8720 + vchiq_log_trace(vchiq_sync_log_level,
8721 + "%d: sf DATA@%x,%x (%d->%d)",
8722 + state->id, (unsigned int)header, size,
8723 + remoteport, localport);
8725 + if ((service->remoteport == remoteport) &&
8726 + (service->srvstate ==
8727 + VCHIQ_SRVSTATE_OPENSYNC)) {
8728 + if (make_service_callback(service,
8729 + VCHIQ_MESSAGE_AVAILABLE, header,
8730 + NULL) == VCHIQ_RETRY)
8731 + vchiq_log_error(vchiq_sync_log_level,
8732 + "synchronous callback to "
8733 + "service %d returns "
8740 + vchiq_log_error(vchiq_sync_log_level,
8741 + "%d: sf unexpected msgid %x@%x,%x",
8742 + state->id, msgid, (unsigned int)header, size);
8743 + release_message_sync(state, header);
8747 + unlock_service(service);
8755 +init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
8757 + queue->local_insert = 0;
8758 + queue->remote_insert = 0;
8759 + queue->process = 0;
8760 + queue->remote_notify = 0;
8761 + queue->remove = 0;
8765 +inline const char *
8766 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
8768 + return conn_state_names[conn_state];
8772 +VCHIQ_SLOT_ZERO_T *
8773 +vchiq_init_slots(void *mem_base, int mem_size)
8775 + int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
8776 + VCHIQ_SLOT_ZERO_T *slot_zero =
8777 + (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
8778 + int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
8779 + int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
8781 + /* Ensure there is enough memory to run an absolutely minimum system */
8782 + num_slots -= first_data_slot;
8784 + if (num_slots < 4) {
8785 + vchiq_log_error(vchiq_core_log_level,
8786 + "vchiq_init_slots - insufficient memory %x bytes",
8791 + memset(slot_zero, 0, sizeof(VCHIQ_SLOT_ZERO_T));
8793 + slot_zero->magic = VCHIQ_MAGIC;
8794 + slot_zero->version = VCHIQ_VERSION;
8795 + slot_zero->version_min = VCHIQ_VERSION_MIN;
8796 + slot_zero->slot_zero_size = sizeof(VCHIQ_SLOT_ZERO_T);
8797 + slot_zero->slot_size = VCHIQ_SLOT_SIZE;
8798 + slot_zero->max_slots = VCHIQ_MAX_SLOTS;
8799 + slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
8801 + slot_zero->master.slot_sync = first_data_slot;
8802 + slot_zero->master.slot_first = first_data_slot + 1;
8803 + slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
8804 + slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
8805 + slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
8806 + slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
8812 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
8815 + VCHIQ_SHARED_STATE_T *local;
8816 + VCHIQ_SHARED_STATE_T *remote;
8817 + VCHIQ_STATUS_T status;
8818 + char threadname[10];
8822 + vchiq_log_warning(vchiq_core_log_level,
8823 + "%s: slot_zero = 0x%08lx, is_master = %d",
8824 + __func__, (unsigned long)slot_zero, is_master);
8826 + /* Check the input configuration */
8828 + if (slot_zero->magic != VCHIQ_MAGIC) {
8829 + vchiq_loud_error_header();
8830 + vchiq_loud_error("Invalid VCHIQ magic value found.");
8831 + vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
8832 + (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
8833 + vchiq_loud_error_footer();
8834 + return VCHIQ_ERROR;
8837 + if (slot_zero->version < VCHIQ_VERSION_MIN) {
8838 + vchiq_loud_error_header();
8839 + vchiq_loud_error("Incompatible VCHIQ versions found.");
8840 + vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
8842 + (unsigned int)slot_zero, slot_zero->version,
8843 + VCHIQ_VERSION_MIN);
8844 + vchiq_loud_error("Restart with a newer VideoCore image.");
8845 + vchiq_loud_error_footer();
8846 + return VCHIQ_ERROR;
8849 + if (VCHIQ_VERSION < slot_zero->version_min) {
8850 + vchiq_loud_error_header();
8851 + vchiq_loud_error("Incompatible VCHIQ versions found.");
8852 + vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
8854 + (unsigned int)slot_zero, VCHIQ_VERSION,
8855 + slot_zero->version_min);
8856 + vchiq_loud_error("Restart with a newer kernel.");
8857 + vchiq_loud_error_footer();
8858 + return VCHIQ_ERROR;
8861 + if ((slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) ||
8862 + (slot_zero->slot_size != VCHIQ_SLOT_SIZE) ||
8863 + (slot_zero->max_slots != VCHIQ_MAX_SLOTS) ||
8864 + (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
8865 + vchiq_loud_error_header();
8866 + if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
8867 + vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
8869 + (unsigned int)slot_zero,
8870 + slot_zero->slot_zero_size,
8871 + sizeof(VCHIQ_SLOT_ZERO_T));
8872 + if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
8873 + vchiq_loud_error("slot_zero=%x: slot_size=%d "
8875 + (unsigned int)slot_zero, slot_zero->slot_size,
8877 + if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
8878 + vchiq_loud_error("slot_zero=%x: max_slots=%d "
8880 + (unsigned int)slot_zero, slot_zero->max_slots,
8882 + if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
8883 + vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
8885 + (unsigned int)slot_zero,
8886 + slot_zero->max_slots_per_side,
8887 + VCHIQ_MAX_SLOTS_PER_SIDE);
8888 + vchiq_loud_error_footer();
8889 + return VCHIQ_ERROR;
8893 + local = &slot_zero->master;
8894 + remote = &slot_zero->slave;
8896 + local = &slot_zero->slave;
8897 + remote = &slot_zero->master;
8900 + if (local->initialised) {
8901 + vchiq_loud_error_header();
8902 + if (remote->initialised)
8903 + vchiq_loud_error("local state has already been "
8906 + vchiq_loud_error("master/slave mismatch - two %ss",
8907 + is_master ? "master" : "slave");
8908 + vchiq_loud_error_footer();
8909 + return VCHIQ_ERROR;
8912 + memset(state, 0, sizeof(VCHIQ_STATE_T));
8915 + state->is_master = is_master;
8918 + initialize shared state pointers
8921 + state->local = local;
8922 + state->remote = remote;
8923 + state->slot_data = (VCHIQ_SLOT_T *)slot_zero;
8926 + initialize events and mutexes
8929 + sema_init(&state->connect, 0);
8930 + mutex_init(&state->mutex);
8931 + sema_init(&state->trigger_event, 0);
8932 + sema_init(&state->recycle_event, 0);
8933 + sema_init(&state->sync_trigger_event, 0);
8934 + sema_init(&state->sync_release_event, 0);
8936 + mutex_init(&state->slot_mutex);
8937 + mutex_init(&state->recycle_mutex);
8938 + mutex_init(&state->sync_mutex);
8939 + mutex_init(&state->bulk_transfer_mutex);
8941 + sema_init(&state->slot_available_event, 0);
8942 + sema_init(&state->slot_remove_event, 0);
8943 + sema_init(&state->data_quota_event, 0);
8945 + state->slot_queue_available = 0;
8947 + for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
8948 + VCHIQ_SERVICE_QUOTA_T *service_quota =
8949 + &state->service_quotas[i];
8950 + sema_init(&service_quota->quota_event, 0);
8953 + for (i = local->slot_first; i <= local->slot_last; i++) {
8954 + local->slot_queue[state->slot_queue_available++] = i;
8955 + up(&state->slot_available_event);
8958 + state->default_slot_quota = state->slot_queue_available/2;
8959 + state->default_message_quota =
8960 + min((unsigned short)(state->default_slot_quota * 256),
8961 + (unsigned short)~0);
8963 + state->previous_data_index = -1;
8964 + state->data_use_count = 0;
8965 + state->data_quota = state->slot_queue_available - 1;
8967 + local->trigger.event = &state->trigger_event;
8968 + remote_event_create(&local->trigger);
8969 + local->tx_pos = 0;
8971 + local->recycle.event = &state->recycle_event;
8972 + remote_event_create(&local->recycle);
8973 + local->slot_queue_recycle = state->slot_queue_available;
8975 + local->sync_trigger.event = &state->sync_trigger_event;
8976 + remote_event_create(&local->sync_trigger);
8978 + local->sync_release.event = &state->sync_release_event;
8979 + remote_event_create(&local->sync_release);
8981 + /* At start-of-day, the slot is empty and available */
8982 + ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
8983 + = VCHIQ_MSGID_PADDING;
8984 + remote_event_signal_local(&local->sync_release);
8986 + local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
8988 + status = vchiq_platform_init_state(state);
8991 + bring up slot handler thread
8993 + snprintf(threadname, sizeof(threadname), "VCHIQ-%d", state->id);
8994 + state->slot_handler_thread = kthread_create(&slot_handler_func,
8998 + if (state->slot_handler_thread == NULL) {
8999 + vchiq_loud_error_header();
9000 + vchiq_loud_error("couldn't create thread %s", threadname);
9001 + vchiq_loud_error_footer();
9002 + return VCHIQ_ERROR;
9004 + set_user_nice(state->slot_handler_thread, -19);
9005 + wake_up_process(state->slot_handler_thread);
9007 + snprintf(threadname, sizeof(threadname), "VCHIQr-%d", state->id);
9008 + state->recycle_thread = kthread_create(&recycle_func,
9011 + if (state->recycle_thread == NULL) {
9012 + vchiq_loud_error_header();
9013 + vchiq_loud_error("couldn't create thread %s", threadname);
9014 + vchiq_loud_error_footer();
9015 + return VCHIQ_ERROR;
9017 + set_user_nice(state->recycle_thread, -19);
9018 + wake_up_process(state->recycle_thread);
9020 + snprintf(threadname, sizeof(threadname), "VCHIQs-%d", state->id);
9021 + state->sync_thread = kthread_create(&sync_func,
9024 + if (state->sync_thread == NULL) {
9025 + vchiq_loud_error_header();
9026 + vchiq_loud_error("couldn't create thread %s", threadname);
9027 + vchiq_loud_error_footer();
9028 + return VCHIQ_ERROR;
9030 + set_user_nice(state->sync_thread, -20);
9031 + wake_up_process(state->sync_thread);
9033 + BUG_ON(state->id >= VCHIQ_MAX_STATES);
9034 + vchiq_states[state->id] = state;
9036 + /* Indicate readiness to the other side */
9037 + local->initialised = 1;
9042 +/* Called from application thread when a client or server service is created. */
9044 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
9045 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
9046 + VCHIQ_INSTANCE_T instance)
9048 + VCHIQ_SERVICE_T *service;
9050 + service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
9052 + service->base.fourcc = params->fourcc;
9053 + service->base.callback = params->callback;
9054 + service->base.userdata = params->userdata;
9055 + service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
9056 + service->ref_count = 1;
9057 + service->srvstate = VCHIQ_SRVSTATE_FREE;
9058 + service->localport = VCHIQ_PORT_FREE;
9059 + service->remoteport = VCHIQ_PORT_FREE;
9061 + service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
9062 + VCHIQ_FOURCC_INVALID : params->fourcc;
9063 + service->client_id = 0;
9064 + service->auto_close = 1;
9065 + service->sync = 0;
9066 + service->closing = 0;
9067 + atomic_set(&service->poll_flags, 0);
9068 + service->version = params->version;
9069 + service->version_min = params->version_min;
9070 + service->state = state;
9071 + service->instance = instance;
9072 + service->service_use_count = 0;
9073 + init_bulk_queue(&service->bulk_tx);
9074 + init_bulk_queue(&service->bulk_rx);
9075 + sema_init(&service->remove_event, 0);
9076 + sema_init(&service->bulk_remove_event, 0);
9077 + mutex_init(&service->bulk_mutex);
9078 + memset(&service->stats, 0, sizeof(service->stats));
9080 + vchiq_log_error(vchiq_core_log_level,
9085 + VCHIQ_SERVICE_T **pservice = NULL;
9088 + /* Although it is perfectly possible to use service_spinlock
9089 + ** to protect the creation of services, it is overkill as it
9090 + ** disables interrupts while the array is searched.
9091 + ** The only danger is of another thread trying to create a
9092 + ** service - service deletion is safe.
9093 + ** Therefore it is preferable to use state->mutex which,
9094 + ** although slower to claim, doesn't block interrupts while
9098 + mutex_lock(&state->mutex);
9100 + /* Prepare to use a previously unused service */
9101 + if (state->unused_service < VCHIQ_MAX_SERVICES)
9102 + pservice = &state->services[state->unused_service];
9104 + if (srvstate == VCHIQ_SRVSTATE_OPENING) {
9105 + for (i = 0; i < state->unused_service; i++) {
9106 + VCHIQ_SERVICE_T *srv = state->services[i];
9108 + pservice = &state->services[i];
9113 + for (i = (state->unused_service - 1); i >= 0; i--) {
9114 + VCHIQ_SERVICE_T *srv = state->services[i];
9116 + pservice = &state->services[i];
9117 + else if ((srv->public_fourcc == params->fourcc)
9118 + && ((srv->instance != instance) ||
9119 + (srv->base.callback !=
9120 + params->callback))) {
9121 + /* There is another server using this
9122 + ** fourcc which doesn't match. */
9130 + service->localport = (pservice - state->services);
9132 + handle_seq = VCHIQ_MAX_STATES *
9133 + VCHIQ_MAX_SERVICES;
9134 + service->handle = handle_seq |
9135 + (state->id * VCHIQ_MAX_SERVICES) |
9136 + service->localport;
9137 + handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
9138 + *pservice = service;
9139 + if (pservice == &state->services[state->unused_service])
9140 + state->unused_service++;
9143 + mutex_unlock(&state->mutex);
9152 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9153 + &state->service_quotas[service->localport];
9154 + service_quota->slot_quota = state->default_slot_quota;
9155 + service_quota->message_quota = state->default_message_quota;
9156 + if (service_quota->slot_use_count == 0)
9157 + service_quota->previous_tx_index =
9158 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
9161 + /* Bring this service online */
9162 + vchiq_set_service_state(service, srvstate);
9164 + vchiq_log_info(vchiq_core_msg_log_level,
9165 + "%s Service %c%c%c%c SrcPort:%d",
9166 + (srvstate == VCHIQ_SRVSTATE_OPENING)
9168 + VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
9169 + service->localport);
9172 + /* Don't unlock the service - leave it with a ref_count of 1. */
9178 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
9180 + struct vchiq_open_payload payload = {
9181 + service->base.fourcc,
9184 + service->version_min
9186 + VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
9187 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9189 + service->client_id = client_id;
9190 + vchiq_use_service_internal(service);
9191 + status = queue_message(service->state, NULL,
9192 + VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
9193 + &body, 1, sizeof(payload), 1);
9194 + if (status == VCHIQ_SUCCESS) {
9195 + if (down_interruptible(&service->remove_event) != 0) {
9196 + status = VCHIQ_RETRY;
9197 + vchiq_release_service_internal(service);
9198 + } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
9199 + (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
9200 + if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
9201 + vchiq_log_error(vchiq_core_log_level,
9202 + "%d: osi - srvstate = %s (ref %d)",
9203 + service->state->id,
9204 + srvstate_names[service->srvstate],
9205 + service->ref_count);
9206 + status = VCHIQ_ERROR;
9207 + VCHIQ_SERVICE_STATS_INC(service, error_count);
9208 + vchiq_release_service_internal(service);
9215 +release_service_messages(VCHIQ_SERVICE_T *service)
9217 + VCHIQ_STATE_T *state = service->state;
9218 + int slot_last = state->remote->slot_last;
9221 + /* Release any claimed messages */
9222 + for (i = state->remote->slot_first; i <= slot_last; i++) {
9223 + VCHIQ_SLOT_INFO_T *slot_info =
9224 + SLOT_INFO_FROM_INDEX(state, i);
9225 + if (slot_info->release_count != slot_info->use_count) {
9227 + (char *)SLOT_DATA_FROM_INDEX(state, i);
9228 + unsigned int pos, end;
9230 + end = VCHIQ_SLOT_SIZE;
9231 + if (data == state->rx_data)
9232 + /* This buffer is still being read from - stop
9233 + ** at the current read position */
9234 + end = state->rx_pos & VCHIQ_SLOT_MASK;
9238 + while (pos < end) {
9239 + VCHIQ_HEADER_T *header =
9240 + (VCHIQ_HEADER_T *)(data + pos);
9241 + int msgid = header->msgid;
9242 + int port = VCHIQ_MSG_DSTPORT(msgid);
9243 + if ((port == service->localport) &&
9244 + (msgid & VCHIQ_MSGID_CLAIMED)) {
9245 + vchiq_log_info(vchiq_core_log_level,
9247 + (unsigned int)header);
9248 + release_slot(state, slot_info, header,
9251 + pos += calc_stride(header->size);
9252 + if (pos > VCHIQ_SLOT_SIZE) {
9253 + vchiq_log_error(vchiq_core_log_level,
9254 + "fsi - pos %x: header %x, "
9255 + "msgid %x, header->msgid %x, "
9256 + "header->size %x",
9257 + pos, (unsigned int)header,
9258 + msgid, header->msgid,
9260 + WARN(1, "invalid slot position\n");
9268 +do_abort_bulks(VCHIQ_SERVICE_T *service)
9270 + VCHIQ_STATUS_T status;
9272 + /* Abort any outstanding bulk transfers */
9273 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
9275 + abort_outstanding_bulks(service, &service->bulk_tx);
9276 + abort_outstanding_bulks(service, &service->bulk_rx);
9277 + mutex_unlock(&service->bulk_mutex);
9279 + status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
9280 + if (status == VCHIQ_SUCCESS)
9281 + status = notify_bulks(service, &service->bulk_rx,
9282 + 0/*!retry_poll*/);
9283 + return (status == VCHIQ_SUCCESS);
9286 +static VCHIQ_STATUS_T
9287 +close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
9289 + VCHIQ_STATUS_T status;
9290 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
9293 + switch (service->srvstate) {
9294 + case VCHIQ_SRVSTATE_OPEN:
9295 + case VCHIQ_SRVSTATE_CLOSESENT:
9296 + case VCHIQ_SRVSTATE_CLOSERECVD:
9298 + if (service->auto_close) {
9299 + service->client_id = 0;
9300 + service->remoteport = VCHIQ_PORT_FREE;
9301 + newstate = VCHIQ_SRVSTATE_LISTENING;
9303 + newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
9305 + newstate = VCHIQ_SRVSTATE_CLOSED;
9306 + vchiq_set_service_state(service, newstate);
9308 + case VCHIQ_SRVSTATE_LISTENING:
9311 + vchiq_log_error(vchiq_core_log_level,
9312 + "close_service_complete(%x) called in state %s",
9313 + service->handle, srvstate_names[service->srvstate]);
9314 + WARN(1, "close_service_complete in unexpected state\n");
9315 + return VCHIQ_ERROR;
9318 + status = make_service_callback(service,
9319 + VCHIQ_SERVICE_CLOSED, NULL, NULL);
9321 + if (status != VCHIQ_RETRY) {
9322 + int uc = service->service_use_count;
9324 + /* Complete the close process */
9325 + for (i = 0; i < uc; i++)
9326 + /* cater for cases where close is forced and the
9327 + ** client may not close all it's handles */
9328 + vchiq_release_service_internal(service);
9330 + service->client_id = 0;
9331 + service->remoteport = VCHIQ_PORT_FREE;
9333 + if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
9334 + vchiq_free_service_internal(service);
9335 + else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
9337 + service->closing = 0;
9339 + up(&service->remove_event);
9342 + vchiq_set_service_state(service, failstate);
9347 +/* Called by the slot handler */
9349 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
9351 + VCHIQ_STATE_T *state = service->state;
9352 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9353 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
9355 + vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
9356 + service->state->id, service->localport, close_recvd,
9357 + srvstate_names[service->srvstate]);
9359 + switch (service->srvstate) {
9360 + case VCHIQ_SRVSTATE_CLOSED:
9361 + case VCHIQ_SRVSTATE_HIDDEN:
9362 + case VCHIQ_SRVSTATE_LISTENING:
9363 + case VCHIQ_SRVSTATE_CLOSEWAIT:
9365 + vchiq_log_error(vchiq_core_log_level,
9366 + "vchiq_close_service_internal(1) called "
9368 + srvstate_names[service->srvstate]);
9369 + else if (is_server) {
9370 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
9371 + status = VCHIQ_ERROR;
9373 + service->client_id = 0;
9374 + service->remoteport = VCHIQ_PORT_FREE;
9375 + if (service->srvstate ==
9376 + VCHIQ_SRVSTATE_CLOSEWAIT)
9377 + vchiq_set_service_state(service,
9378 + VCHIQ_SRVSTATE_LISTENING);
9380 + up(&service->remove_event);
9382 + vchiq_free_service_internal(service);
9384 + case VCHIQ_SRVSTATE_OPENING:
9385 + if (close_recvd) {
9386 + /* The open was rejected - tell the user */
9387 + vchiq_set_service_state(service,
9388 + VCHIQ_SRVSTATE_CLOSEWAIT);
9389 + up(&service->remove_event);
9391 + /* Shutdown mid-open - let the other side know */
9392 + status = queue_message(state, service,
9395 + service->localport,
9396 + VCHIQ_MSG_DSTPORT(service->remoteport)),
9401 + case VCHIQ_SRVSTATE_OPENSYNC:
9402 + mutex_lock(&state->sync_mutex);
9403 + /* Drop through */
9405 + case VCHIQ_SRVSTATE_OPEN:
9406 + if (state->is_master || close_recvd) {
9407 + if (!do_abort_bulks(service))
9408 + status = VCHIQ_RETRY;
9411 + release_service_messages(service);
9413 + if (status == VCHIQ_SUCCESS)
9414 + status = queue_message(state, service,
9417 + service->localport,
9418 + VCHIQ_MSG_DSTPORT(service->remoteport)),
9421 + if (status == VCHIQ_SUCCESS) {
9424 + } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
9425 + mutex_unlock(&state->sync_mutex);
9430 + status = close_service_complete(service,
9431 + VCHIQ_SRVSTATE_CLOSERECVD);
9434 + case VCHIQ_SRVSTATE_CLOSESENT:
9436 + /* This happens when a process is killed mid-close */
9439 + if (!state->is_master) {
9440 + if (!do_abort_bulks(service)) {
9441 + status = VCHIQ_RETRY;
9446 + if (status == VCHIQ_SUCCESS)
9447 + status = close_service_complete(service,
9448 + VCHIQ_SRVSTATE_CLOSERECVD);
9451 + case VCHIQ_SRVSTATE_CLOSERECVD:
9452 + if (!close_recvd && is_server)
9453 + /* Force into LISTENING mode */
9454 + vchiq_set_service_state(service,
9455 + VCHIQ_SRVSTATE_LISTENING);
9456 + status = close_service_complete(service,
9457 + VCHIQ_SRVSTATE_CLOSERECVD);
9461 + vchiq_log_error(vchiq_core_log_level,
9462 + "vchiq_close_service_internal(%d) called in state %s",
9463 + close_recvd, srvstate_names[service->srvstate]);
9470 +/* Called from the application process upon process death */
9472 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service)
9474 + VCHIQ_STATE_T *state = service->state;
9476 + vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
9477 + state->id, service->localport, service->remoteport);
9479 + mark_service_closing(service);
9481 + /* Mark the service for removal by the slot handler */
9482 + request_poll(state, service, VCHIQ_POLL_REMOVE);
9485 +/* Called from the slot handler */
9487 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service)
9489 + VCHIQ_STATE_T *state = service->state;
9491 + vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
9492 + state->id, service->localport);
9494 + switch (service->srvstate) {
9495 + case VCHIQ_SRVSTATE_OPENING:
9496 + case VCHIQ_SRVSTATE_CLOSED:
9497 + case VCHIQ_SRVSTATE_HIDDEN:
9498 + case VCHIQ_SRVSTATE_LISTENING:
9499 + case VCHIQ_SRVSTATE_CLOSEWAIT:
9502 + vchiq_log_error(vchiq_core_log_level,
9503 + "%d: fsi - (%d) in state %s",
9504 + state->id, service->localport,
9505 + srvstate_names[service->srvstate]);
9509 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
9511 + up(&service->remove_event);
9513 + /* Release the initial lock */
9514 + unlock_service(service);
9518 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
9520 + VCHIQ_SERVICE_T *service;
9523 + /* Find all services registered to this client and enable them. */
9525 + while ((service = next_service_by_instance(state, instance,
9527 + if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
9528 + vchiq_set_service_state(service,
9529 + VCHIQ_SRVSTATE_LISTENING);
9530 + unlock_service(service);
9533 + if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
9534 + if (queue_message(state, NULL,
9535 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, 0,
9536 + 0, 1) == VCHIQ_RETRY)
9537 + return VCHIQ_RETRY;
9539 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
9542 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
9543 + if (down_interruptible(&state->connect) != 0)
9544 + return VCHIQ_RETRY;
9546 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
9547 + up(&state->connect);
9550 + return VCHIQ_SUCCESS;
9554 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
9556 + VCHIQ_SERVICE_T *service;
9559 + /* Find all services registered to this client and enable them. */
9561 + while ((service = next_service_by_instance(state, instance,
9563 + (void)vchiq_remove_service(service->handle);
9564 + unlock_service(service);
9567 + return VCHIQ_SUCCESS;
9571 +vchiq_pause_internal(VCHIQ_STATE_T *state)
9573 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9575 + switch (state->conn_state) {
9576 + case VCHIQ_CONNSTATE_CONNECTED:
9577 + /* Request a pause */
9578 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
9579 + request_poll(state, NULL, 0);
9582 + vchiq_log_error(vchiq_core_log_level,
9583 + "vchiq_pause_internal in state %s\n",
9584 + conn_state_names[state->conn_state]);
9585 + status = VCHIQ_ERROR;
9586 + VCHIQ_STATS_INC(state, error_count);
9594 +vchiq_resume_internal(VCHIQ_STATE_T *state)
9596 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9598 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
9599 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
9600 + request_poll(state, NULL, 0);
9602 + status = VCHIQ_ERROR;
9603 + VCHIQ_STATS_INC(state, error_count);
9610 +vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
9612 + /* Unregister the service */
9613 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9614 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9617 + return VCHIQ_ERROR;
9619 + vchiq_log_info(vchiq_core_log_level,
9620 + "%d: close_service:%d",
9621 + service->state->id, service->localport);
9623 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
9624 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
9625 + (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
9626 + unlock_service(service);
9627 + return VCHIQ_ERROR;
9630 + mark_service_closing(service);
9632 + if (current == service->state->slot_handler_thread) {
9633 + status = vchiq_close_service_internal(service,
9634 + 0/*!close_recvd*/);
9635 + BUG_ON(status == VCHIQ_RETRY);
9637 + /* Mark the service for termination by the slot handler */
9638 + request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
9642 + if (down_interruptible(&service->remove_event) != 0) {
9643 + status = VCHIQ_RETRY;
9647 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
9648 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
9649 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
9652 + vchiq_log_warning(vchiq_core_log_level,
9653 + "%d: close_service:%d - waiting in state %s",
9654 + service->state->id, service->localport,
9655 + srvstate_names[service->srvstate]);
9658 + if ((status == VCHIQ_SUCCESS) &&
9659 + (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
9660 + (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
9661 + status = VCHIQ_ERROR;
9663 + unlock_service(service);
9669 +vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
9671 + /* Unregister the service */
9672 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9673 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9676 + return VCHIQ_ERROR;
9678 + vchiq_log_info(vchiq_core_log_level,
9679 + "%d: remove_service:%d",
9680 + service->state->id, service->localport);
9682 + if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
9683 + unlock_service(service);
9684 + return VCHIQ_ERROR;
9687 + mark_service_closing(service);
9689 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
9690 + (current == service->state->slot_handler_thread)) {
9691 + /* Make it look like a client, because it must be removed and
9692 + not left in the LISTENING state. */
9693 + service->public_fourcc = VCHIQ_FOURCC_INVALID;
9695 + status = vchiq_close_service_internal(service,
9696 + 0/*!close_recvd*/);
9697 + BUG_ON(status == VCHIQ_RETRY);
9699 + /* Mark the service for removal by the slot handler */
9700 + request_poll(service->state, service, VCHIQ_POLL_REMOVE);
9703 + if (down_interruptible(&service->remove_event) != 0) {
9704 + status = VCHIQ_RETRY;
9708 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
9709 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
9712 + vchiq_log_warning(vchiq_core_log_level,
9713 + "%d: remove_service:%d - waiting in state %s",
9714 + service->state->id, service->localport,
9715 + srvstate_names[service->srvstate]);
9718 + if ((status == VCHIQ_SUCCESS) &&
9719 + (service->srvstate != VCHIQ_SRVSTATE_FREE))
9720 + status = VCHIQ_ERROR;
9722 + unlock_service(service);
9728 +/* This function may be called by kernel threads or user threads.
9729 + * User threads may receive VCHIQ_RETRY to indicate that a signal has been
9730 + * received and the call should be retried after being returned to user
9732 + * When called in blocking mode, the userdata field points to a bulk_waiter
9736 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
9737 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
9738 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir)
9740 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9741 + VCHIQ_BULK_QUEUE_T *queue;
9742 + VCHIQ_BULK_T *bulk;
9743 + VCHIQ_STATE_T *state;
9744 + struct bulk_waiter *bulk_waiter = NULL;
9745 + const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
9746 + const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
9747 + VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
9748 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9751 + (service->srvstate != VCHIQ_SRVSTATE_OPEN) ||
9752 + ((memhandle == VCHI_MEM_HANDLE_INVALID) && (offset == NULL)) ||
9753 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
9757 + case VCHIQ_BULK_MODE_NOCALLBACK:
9758 + case VCHIQ_BULK_MODE_CALLBACK:
9760 + case VCHIQ_BULK_MODE_BLOCKING:
9761 + bulk_waiter = (struct bulk_waiter *)userdata;
9762 + sema_init(&bulk_waiter->event, 0);
9763 + bulk_waiter->actual = 0;
9764 + bulk_waiter->bulk = NULL;
9766 + case VCHIQ_BULK_MODE_WAITING:
9767 + bulk_waiter = (struct bulk_waiter *)userdata;
9768 + bulk = bulk_waiter->bulk;
9774 + state = service->state;
9776 + queue = (dir == VCHIQ_BULK_TRANSMIT) ?
9777 + &service->bulk_tx : &service->bulk_rx;
9779 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
9780 + status = VCHIQ_RETRY;
9784 + if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
9785 + VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
9787 + mutex_unlock(&service->bulk_mutex);
9788 + if (down_interruptible(&service->bulk_remove_event)
9790 + status = VCHIQ_RETRY;
9793 + if (mutex_lock_interruptible(&service->bulk_mutex)
9795 + status = VCHIQ_RETRY;
9798 + } while (queue->local_insert == queue->remove +
9799 + VCHIQ_NUM_SERVICE_BULKS);
9802 + bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
9804 + bulk->mode = mode;
9806 + bulk->userdata = userdata;
9807 + bulk->size = size;
9808 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
9810 + if (vchiq_prepare_bulk_data(bulk, memhandle, offset, size, dir) !=
9812 + goto unlock_error_exit;
9816 + vchiq_log_info(vchiq_core_log_level,
9817 + "%d: bt (%d->%d) %cx %x@%x %x",
9819 + service->localport, service->remoteport, dir_char,
9820 + size, (unsigned int)bulk->data, (unsigned int)userdata);
9822 + if (state->is_master) {
9823 + queue->local_insert++;
9824 + if (resolve_bulks(service, queue))
9825 + request_poll(state, service,
9826 + (dir == VCHIQ_BULK_TRANSMIT) ?
9827 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
9829 + int payload[2] = { (int)bulk->data, bulk->size };
9830 + VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
9832 + status = queue_message(state, NULL,
9833 + VCHIQ_MAKE_MSG(dir_msgtype,
9834 + service->localport, service->remoteport),
9835 + &element, 1, sizeof(payload), 1);
9836 + if (status != VCHIQ_SUCCESS) {
9837 + vchiq_complete_bulk(bulk);
9838 + goto unlock_error_exit;
9840 + queue->local_insert++;
9843 + mutex_unlock(&service->bulk_mutex);
9845 + vchiq_log_trace(vchiq_core_log_level,
9846 + "%d: bt:%d %cx li=%x ri=%x p=%x",
9848 + service->localport, dir_char,
9849 + queue->local_insert, queue->remote_insert, queue->process);
9852 + unlock_service(service);
9854 + status = VCHIQ_SUCCESS;
9856 + if (bulk_waiter) {
9857 + bulk_waiter->bulk = bulk;
9858 + if (down_interruptible(&bulk_waiter->event) != 0)
9859 + status = VCHIQ_RETRY;
9860 + else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
9861 + status = VCHIQ_ERROR;
9867 + mutex_unlock(&service->bulk_mutex);
9871 + unlock_service(service);
9876 +vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
9877 + const VCHIQ_ELEMENT_T *elements, unsigned int count)
9879 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9880 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9882 + unsigned int size = 0;
9886 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
9889 + for (i = 0; i < (unsigned int)count; i++) {
9890 + if (elements[i].size) {
9891 + if (elements[i].data == NULL) {
9892 + VCHIQ_SERVICE_STATS_INC(service, error_count);
9895 + size += elements[i].size;
9899 + if (size > VCHIQ_MAX_MSG_SIZE) {
9900 + VCHIQ_SERVICE_STATS_INC(service, error_count);
9904 + switch (service->srvstate) {
9905 + case VCHIQ_SRVSTATE_OPEN:
9906 + status = queue_message(service->state, service,
9907 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
9908 + service->localport,
9909 + service->remoteport),
9910 + elements, count, size, 1);
9912 + case VCHIQ_SRVSTATE_OPENSYNC:
9913 + status = queue_message_sync(service->state, service,
9914 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
9915 + service->localport,
9916 + service->remoteport),
9917 + elements, count, size, 1);
9920 + status = VCHIQ_ERROR;
9926 + unlock_service(service);
9932 +vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
9934 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9935 + VCHIQ_SHARED_STATE_T *remote;
9936 + VCHIQ_STATE_T *state;
9942 + state = service->state;
9943 + remote = state->remote;
9945 + slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
9947 + if ((slot_index >= remote->slot_first) &&
9948 + (slot_index <= remote->slot_last)) {
9949 + int msgid = header->msgid;
9950 + if (msgid & VCHIQ_MSGID_CLAIMED) {
9951 + VCHIQ_SLOT_INFO_T *slot_info =
9952 + SLOT_INFO_FROM_INDEX(state, slot_index);
9954 + release_slot(state, slot_info, header, service);
9956 + } else if (slot_index == remote->slot_sync)
9957 + release_message_sync(state, header);
9959 + unlock_service(service);
9963 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
9965 + header->msgid = VCHIQ_MSGID_PADDING;
9967 + remote_event_signal(&state->remote->sync_release);
9971 +vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
9973 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
9974 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9977 + (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
9980 + *peer_version = service->peer_version;
9981 + status = VCHIQ_SUCCESS;
9985 + unlock_service(service);
9990 +vchiq_get_config(VCHIQ_INSTANCE_T instance,
9991 + int config_size, VCHIQ_CONFIG_T *pconfig)
9993 + VCHIQ_CONFIG_T config;
9997 + config.max_msg_size = VCHIQ_MAX_MSG_SIZE;
9998 + config.bulk_threshold = VCHIQ_MAX_MSG_SIZE;
9999 + config.max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
10000 + config.max_services = VCHIQ_MAX_SERVICES;
10001 + config.version = VCHIQ_VERSION;
10002 + config.version_min = VCHIQ_VERSION_MIN;
10004 + if (config_size > sizeof(VCHIQ_CONFIG_T))
10005 + return VCHIQ_ERROR;
10007 + memcpy(pconfig, &config,
10008 + min(config_size, (int)(sizeof(VCHIQ_CONFIG_T))));
10010 + return VCHIQ_SUCCESS;
10014 +vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
10015 + VCHIQ_SERVICE_OPTION_T option, int value)
10017 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
10018 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
10021 + switch (option) {
10022 + case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
10023 + service->auto_close = value;
10024 + status = VCHIQ_SUCCESS;
10027 + case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
10028 + VCHIQ_SERVICE_QUOTA_T *service_quota =
10029 + &service->state->service_quotas[
10030 + service->localport];
10032 + value = service->state->default_slot_quota;
10033 + if ((value >= service_quota->slot_use_count) &&
10034 + (value < (unsigned short)~0)) {
10035 + service_quota->slot_quota = value;
10036 + if ((value >= service_quota->slot_use_count) &&
10037 + (service_quota->message_quota >=
10038 + service_quota->message_use_count)) {
10039 + /* Signal the service that it may have
10040 + ** dropped below its quota */
10041 + up(&service_quota->quota_event);
10043 + status = VCHIQ_SUCCESS;
10047 + case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
10048 + VCHIQ_SERVICE_QUOTA_T *service_quota =
10049 + &service->state->service_quotas[
10050 + service->localport];
10052 + value = service->state->default_message_quota;
10053 + if ((value >= service_quota->message_use_count) &&
10054 + (value < (unsigned short)~0)) {
10055 + service_quota->message_quota = value;
10057 + service_quota->message_use_count) &&
10058 + (service_quota->slot_quota >=
10059 + service_quota->slot_use_count))
10060 + /* Signal the service that it may have
10061 + ** dropped below its quota */
10062 + up(&service_quota->quota_event);
10063 + status = VCHIQ_SUCCESS;
10067 + case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
10068 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
10069 + (service->srvstate ==
10070 + VCHIQ_SRVSTATE_LISTENING)) {
10071 + service->sync = value;
10072 + status = VCHIQ_SUCCESS;
10079 + unlock_service(service);
10086 +vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
10087 + VCHIQ_SHARED_STATE_T *shared, const char *label)
10089 + static const char *const debug_names[] = {
10091 + "SLOT_HANDLER_COUNT",
10092 + "SLOT_HANDLER_LINE",
10096 + "AWAIT_COMPLETION_LINE",
10097 + "DEQUEUE_MESSAGE_LINE",
10098 + "SERVICE_CALLBACK_LINE",
10099 + "MSG_QUEUE_FULL_COUNT",
10100 + "COMPLETION_QUEUE_FULL_COUNT"
10106 + len = snprintf(buf, sizeof(buf),
10107 + " %s: slots %d-%d tx_pos=%x recycle=%x",
10108 + label, shared->slot_first, shared->slot_last,
10109 + shared->tx_pos, shared->slot_queue_recycle);
10110 + vchiq_dump(dump_context, buf, len + 1);
10112 + len = snprintf(buf, sizeof(buf),
10113 + " Slots claimed:");
10114 + vchiq_dump(dump_context, buf, len + 1);
10116 + for (i = shared->slot_first; i <= shared->slot_last; i++) {
10117 + VCHIQ_SLOT_INFO_T slot_info = *SLOT_INFO_FROM_INDEX(state, i);
10118 + if (slot_info.use_count != slot_info.release_count) {
10119 + len = snprintf(buf, sizeof(buf),
10120 + " %d: %d/%d", i, slot_info.use_count,
10121 + slot_info.release_count);
10122 + vchiq_dump(dump_context, buf, len + 1);
10126 + for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
10127 + len = snprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
10128 + debug_names[i], shared->debug[i], shared->debug[i]);
10129 + vchiq_dump(dump_context, buf, len + 1);
10134 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
10140 + len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
10141 + conn_state_names[state->conn_state]);
10142 + vchiq_dump(dump_context, buf, len + 1);
10144 + len = snprintf(buf, sizeof(buf),
10145 + " tx_pos=%x(@%x), rx_pos=%x(@%x)",
10146 + state->local->tx_pos,
10147 + (uint32_t)state->tx_data +
10148 + (state->local_tx_pos & VCHIQ_SLOT_MASK),
10150 + (uint32_t)state->rx_data +
10151 + (state->rx_pos & VCHIQ_SLOT_MASK));
10152 + vchiq_dump(dump_context, buf, len + 1);
10154 + len = snprintf(buf, sizeof(buf),
10155 + " Version: %d (min %d)",
10156 + VCHIQ_VERSION, VCHIQ_VERSION_MIN);
10157 + vchiq_dump(dump_context, buf, len + 1);
10159 + if (VCHIQ_ENABLE_STATS) {
10160 + len = snprintf(buf, sizeof(buf),
10161 + " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
10162 + "error_count=%d",
10163 + state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
10164 + state->stats.error_count);
10165 + vchiq_dump(dump_context, buf, len + 1);
10168 + len = snprintf(buf, sizeof(buf),
10169 + " Slots: %d available (%d data), %d recyclable, %d stalls "
10171 + ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
10172 + state->local_tx_pos) / VCHIQ_SLOT_SIZE,
10173 + state->data_quota - state->data_use_count,
10174 + state->local->slot_queue_recycle - state->slot_queue_available,
10175 + state->stats.slot_stalls, state->stats.data_stalls);
10176 + vchiq_dump(dump_context, buf, len + 1);
10178 + vchiq_dump_platform_state(dump_context);
10180 + vchiq_dump_shared_state(dump_context, state, state->local, "Local");
10181 + vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
10183 + vchiq_dump_platform_instances(dump_context);
10185 + for (i = 0; i < state->unused_service; i++) {
10186 + VCHIQ_SERVICE_T *service = find_service_by_port(state, i);
10189 + vchiq_dump_service_state(dump_context, service);
10190 + unlock_service(service);
10196 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
10201 + len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
10202 + service->localport, srvstate_names[service->srvstate],
10203 + service->ref_count - 1); /*Don't include the lock just taken*/
10205 + if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
10206 + char remoteport[30];
10207 + VCHIQ_SERVICE_QUOTA_T *service_quota =
10208 + &service->state->service_quotas[service->localport];
10209 + int fourcc = service->base.fourcc;
10210 + int tx_pending, rx_pending;
10211 + if (service->remoteport != VCHIQ_PORT_FREE) {
10212 + int len2 = snprintf(remoteport, sizeof(remoteport),
10213 + "%d", service->remoteport);
10214 + if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
10215 + snprintf(remoteport + len2,
10216 + sizeof(remoteport) - len2,
10217 + " (client %x)", service->client_id);
10219 + strcpy(remoteport, "n/a");
10221 + len += snprintf(buf + len, sizeof(buf) - len,
10222 + " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
10223 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
10225 + service_quota->message_use_count,
10226 + service_quota->message_quota,
10227 + service_quota->slot_use_count,
10228 + service_quota->slot_quota);
10230 + vchiq_dump(dump_context, buf, len + 1);
10232 + tx_pending = service->bulk_tx.local_insert -
10233 + service->bulk_tx.remote_insert;
10235 + rx_pending = service->bulk_rx.local_insert -
10236 + service->bulk_rx.remote_insert;
10238 + len = snprintf(buf, sizeof(buf),
10239 + " Bulk: tx_pending=%d (size %d),"
10240 + " rx_pending=%d (size %d)",
10242 + tx_pending ? service->bulk_tx.bulks[
10243 + BULK_INDEX(service->bulk_tx.remove)].size : 0,
10245 + rx_pending ? service->bulk_rx.bulks[
10246 + BULK_INDEX(service->bulk_rx.remove)].size : 0);
10248 + if (VCHIQ_ENABLE_STATS) {
10249 + vchiq_dump(dump_context, buf, len + 1);
10251 + len = snprintf(buf, sizeof(buf),
10252 + " Ctrl: tx_count=%d, tx_bytes=%llu, "
10253 + "rx_count=%d, rx_bytes=%llu",
10254 + service->stats.ctrl_tx_count,
10255 + service->stats.ctrl_tx_bytes,
10256 + service->stats.ctrl_rx_count,
10257 + service->stats.ctrl_rx_bytes);
10258 + vchiq_dump(dump_context, buf, len + 1);
10260 + len = snprintf(buf, sizeof(buf),
10261 + " Bulk: tx_count=%d, tx_bytes=%llu, "
10262 + "rx_count=%d, rx_bytes=%llu",
10263 + service->stats.bulk_tx_count,
10264 + service->stats.bulk_tx_bytes,
10265 + service->stats.bulk_rx_count,
10266 + service->stats.bulk_rx_bytes);
10267 + vchiq_dump(dump_context, buf, len + 1);
10269 + len = snprintf(buf, sizeof(buf),
10270 + " %d quota stalls, %d slot stalls, "
10271 + "%d bulk stalls, %d aborted, %d errors",
10272 + service->stats.quota_stalls,
10273 + service->stats.slot_stalls,
10274 + service->stats.bulk_stalls,
10275 + service->stats.bulk_aborted_count,
10276 + service->stats.error_count);
10280 + vchiq_dump(dump_context, buf, len + 1);
10282 + if (service->srvstate != VCHIQ_SRVSTATE_FREE)
10283 + vchiq_dump_platform_service_state(dump_context, service);
10288 +vchiq_loud_error_header(void)
10290 + vchiq_log_error(vchiq_core_log_level,
10291 + "============================================================"
10292 + "================");
10293 + vchiq_log_error(vchiq_core_log_level,
10294 + "============================================================"
10295 + "================");
10296 + vchiq_log_error(vchiq_core_log_level, "=====");
10300 +vchiq_loud_error_footer(void)
10302 + vchiq_log_error(vchiq_core_log_level, "=====");
10303 + vchiq_log_error(vchiq_core_log_level,
10304 + "============================================================"
10305 + "================");
10306 + vchiq_log_error(vchiq_core_log_level,
10307 + "============================================================"
10308 + "================");
10312 +VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
10314 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
10315 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
10316 + status = queue_message(state, NULL,
10317 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
10322 +VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
10324 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
10325 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
10326 + status = queue_message(state, NULL,
10327 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
10332 +VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
10334 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
10335 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
10336 + status = queue_message(state, NULL,
10337 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
10342 +void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
10345 + const uint8_t *mem = (const uint8_t *)voidMem;
10347 + char lineBuf[100];
10350 + while (numBytes > 0) {
10353 + for (offset = 0; offset < 16; offset++) {
10354 + if (offset < numBytes)
10355 + s += snprintf(s, 4, "%02x ", mem[offset]);
10357 + s += snprintf(s, 4, " ");
10360 + for (offset = 0; offset < 16; offset++) {
10361 + if (offset < numBytes) {
10362 + uint8_t ch = mem[offset];
10364 + if ((ch < ' ') || (ch > '~'))
10371 + if ((label != NULL) && (*label != '\0'))
10372 + vchiq_log_trace(VCHIQ_LOG_TRACE,
10373 + "%s: %08x: %s", label, addr, lineBuf);
10375 + vchiq_log_trace(VCHIQ_LOG_TRACE,
10376 + "%08x: %s", addr, lineBuf);
10380 + if (numBytes > 16)
10386 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
10387 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h 1970-01-01 01:00:00.000000000 +0100
10388 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h 2013-07-06 15:25:50.000000000 +0100
10391 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10393 + * Redistribution and use in source and binary forms, with or without
10394 + * modification, are permitted provided that the following conditions
10396 + * 1. Redistributions of source code must retain the above copyright
10397 + * notice, this list of conditions, and the following disclaimer,
10398 + * without modification.
10399 + * 2. Redistributions in binary form must reproduce the above copyright
10400 + * notice, this list of conditions and the following disclaimer in the
10401 + * documentation and/or other materials provided with the distribution.
10402 + * 3. The names of the above-listed copyright holders may not be used
10403 + * to endorse or promote products derived from this software without
10404 + * specific prior written permission.
10406 + * ALTERNATIVELY, this software may be distributed under the terms of the
10407 + * GNU General Public License ("GPL") version 2, as published by the Free
10408 + * Software Foundation.
10410 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10411 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10412 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10413 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10414 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10415 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10416 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10417 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10418 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10419 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10420 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10423 +#ifndef VCHIQ_CORE_H
10424 +#define VCHIQ_CORE_H
10426 +#include <linux/mutex.h>
10427 +#include <linux/semaphore.h>
10428 +#include <linux/kthread.h>
10430 +#include "vchiq_cfg.h"
10432 +#include "vchiq.h"
10434 +/* Run time control of log level, based on KERN_XXX level. */
10435 +#define VCHIQ_LOG_DEFAULT 4
10436 +#define VCHIQ_LOG_ERROR 3
10437 +#define VCHIQ_LOG_WARNING 4
10438 +#define VCHIQ_LOG_INFO 6
10439 +#define VCHIQ_LOG_TRACE 7
10441 +#define VCHIQ_LOG_PREFIX KERN_INFO "vchiq: "
10443 +#ifndef vchiq_log_error
10444 +#define vchiq_log_error(cat, fmt, ...) \
10445 + do { if (cat >= VCHIQ_LOG_ERROR) \
10446 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
10448 +#ifndef vchiq_log_warning
10449 +#define vchiq_log_warning(cat, fmt, ...) \
10450 + do { if (cat >= VCHIQ_LOG_WARNING) \
10451 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
10453 +#ifndef vchiq_log_info
10454 +#define vchiq_log_info(cat, fmt, ...) \
10455 + do { if (cat >= VCHIQ_LOG_INFO) \
10456 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
10458 +#ifndef vchiq_log_trace
10459 +#define vchiq_log_trace(cat, fmt, ...) \
10460 + do { if (cat >= VCHIQ_LOG_TRACE) \
10461 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
10464 +#define vchiq_loud_error(...) \
10465 + vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
10467 +#ifndef vchiq_static_assert
10468 +#define vchiq_static_assert(cond) __attribute__((unused)) \
10469 + extern int vchiq_static_assert[(cond) ? 1 : -1]
10472 +#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
10474 +/* Ensure that the slot size and maximum number of slots are powers of 2 */
10475 +vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
10476 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
10477 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
10479 +#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
10480 +#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
10481 +#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(VCHIQ_SLOT_ZERO_T) + \
10482 + VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
10484 +#define VCHIQ_MSG_PADDING 0 /* - */
10485 +#define VCHIQ_MSG_CONNECT 1 /* - */
10486 +#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
10487 +#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
10488 +#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
10489 +#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
10490 +#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
10491 +#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
10492 +#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
10493 +#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
10494 +#define VCHIQ_MSG_PAUSE 10 /* - */
10495 +#define VCHIQ_MSG_RESUME 11 /* - */
10496 +#define VCHIQ_MSG_REMOTE_USE 12 /* - */
10497 +#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
10498 +#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
10500 +#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
10501 +#define VCHIQ_PORT_FREE 0x1000
10502 +#define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE)
10503 +#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
10504 + ((type<<24) | (srcport<<12) | (dstport<<0))
10505 +#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24)
10506 +#define VCHIQ_MSG_SRCPORT(msgid) \
10507 + (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
10508 +#define VCHIQ_MSG_DSTPORT(msgid) \
10509 + ((unsigned short)msgid & 0xfff)
10511 +#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
10512 + ((fourcc) >> 24) & 0xff, \
10513 + ((fourcc) >> 16) & 0xff, \
10514 + ((fourcc) >> 8) & 0xff, \
10517 +/* Ensure the fields are wide enough */
10518 +vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
10520 +vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
10521 +vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
10522 + (unsigned int)VCHIQ_PORT_FREE);
10524 +#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
10525 +#define VCHIQ_MSGID_CLAIMED 0x40000000
10527 +#define VCHIQ_FOURCC_INVALID 0x00000000
10528 +#define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID)
10530 +#define VCHIQ_BULK_ACTUAL_ABORTED -1
10532 +typedef uint32_t BITSET_T;
10534 +vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
10536 +#define BITSET_SIZE(b) ((b + 31) >> 5)
10537 +#define BITSET_WORD(b) (b >> 5)
10538 +#define BITSET_BIT(b) (1 << (b & 31))
10539 +#define BITSET_ZERO(bs) memset(bs, 0, sizeof(bs))
10540 +#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
10541 +#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
10542 +#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
10544 +#if VCHIQ_ENABLE_STATS
10545 +#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
10546 +#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
10547 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
10548 + (service->stats. stat += addend)
10550 +#define VCHIQ_STATS_INC(state, stat) ((void)0)
10551 +#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
10552 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
10557 +#if VCHIQ_ENABLE_DEBUG
10558 + DEBUG_SLOT_HANDLER_COUNT,
10559 + DEBUG_SLOT_HANDLER_LINE,
10560 + DEBUG_PARSE_LINE,
10561 + DEBUG_PARSE_HEADER,
10562 + DEBUG_PARSE_MSGID,
10563 + DEBUG_AWAIT_COMPLETION_LINE,
10564 + DEBUG_DEQUEUE_MESSAGE_LINE,
10565 + DEBUG_SERVICE_CALLBACK_LINE,
10566 + DEBUG_MSG_QUEUE_FULL_COUNT,
10567 + DEBUG_COMPLETION_QUEUE_FULL_COUNT,
10572 +#if VCHIQ_ENABLE_DEBUG
10574 +#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
10575 +#define DEBUG_TRACE(d) \
10576 + do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
10577 +#define DEBUG_VALUE(d, v) \
10578 + do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
10579 +#define DEBUG_COUNT(d) \
10580 + do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
10582 +#else /* VCHIQ_ENABLE_DEBUG */
10584 +#define DEBUG_INITIALISE(local)
10585 +#define DEBUG_TRACE(d)
10586 +#define DEBUG_VALUE(d, v)
10587 +#define DEBUG_COUNT(d)
10589 +#endif /* VCHIQ_ENABLE_DEBUG */
10592 + VCHIQ_CONNSTATE_DISCONNECTED,
10593 + VCHIQ_CONNSTATE_CONNECTING,
10594 + VCHIQ_CONNSTATE_CONNECTED,
10595 + VCHIQ_CONNSTATE_PAUSING,
10596 + VCHIQ_CONNSTATE_PAUSE_SENT,
10597 + VCHIQ_CONNSTATE_PAUSED,
10598 + VCHIQ_CONNSTATE_RESUMING,
10599 + VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
10600 + VCHIQ_CONNSTATE_RESUME_TIMEOUT
10601 +} VCHIQ_CONNSTATE_T;
10604 + VCHIQ_SRVSTATE_FREE,
10605 + VCHIQ_SRVSTATE_HIDDEN,
10606 + VCHIQ_SRVSTATE_LISTENING,
10607 + VCHIQ_SRVSTATE_OPENING,
10608 + VCHIQ_SRVSTATE_OPEN,
10609 + VCHIQ_SRVSTATE_OPENSYNC,
10610 + VCHIQ_SRVSTATE_CLOSESENT,
10611 + VCHIQ_SRVSTATE_CLOSERECVD,
10612 + VCHIQ_SRVSTATE_CLOSEWAIT,
10613 + VCHIQ_SRVSTATE_CLOSED
10617 + VCHIQ_POLL_TERMINATE,
10618 + VCHIQ_POLL_REMOVE,
10619 + VCHIQ_POLL_TXNOTIFY,
10620 + VCHIQ_POLL_RXNOTIFY,
10625 + VCHIQ_BULK_TRANSMIT,
10626 + VCHIQ_BULK_RECEIVE
10627 +} VCHIQ_BULK_DIR_T;
10629 +typedef struct vchiq_bulk_struct {
10633 + VCHI_MEM_HANDLE_T handle;
10636 + void *remote_data;
10641 +typedef struct vchiq_bulk_queue_struct {
10642 + int local_insert; /* Where to insert the next local bulk */
10643 + int remote_insert; /* Where to insert the next remote bulk (master) */
10644 + int process; /* Bulk to transfer next */
10645 + int remote_notify; /* Bulk to notify the remote client of next (mstr) */
10646 + int remove; /* Bulk to notify the local client of, and remove,
10648 + VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
10649 +} VCHIQ_BULK_QUEUE_T;
10651 +typedef struct remote_event_struct {
10654 + struct semaphore *event;
10657 +typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
10659 +typedef struct vchiq_state_struct VCHIQ_STATE_T;
10661 +typedef struct vchiq_slot_struct {
10662 + char data[VCHIQ_SLOT_SIZE];
10665 +typedef struct vchiq_slot_info_struct {
10666 + /* Use two counters rather than one to avoid the need for a mutex. */
10668 + short release_count;
10669 +} VCHIQ_SLOT_INFO_T;
10671 +typedef struct vchiq_service_struct {
10672 + VCHIQ_SERVICE_BASE_T base;
10673 + VCHIQ_SERVICE_HANDLE_T handle;
10674 + unsigned int ref_count;
10676 + unsigned int localport;
10677 + unsigned int remoteport;
10678 + int public_fourcc;
10683 + atomic_t poll_flags;
10685 + short version_min;
10686 + short peer_version;
10688 + VCHIQ_STATE_T *state;
10689 + VCHIQ_INSTANCE_T instance;
10691 + int service_use_count;
10693 + VCHIQ_BULK_QUEUE_T bulk_tx;
10694 + VCHIQ_BULK_QUEUE_T bulk_rx;
10696 + struct semaphore remove_event;
10697 + struct semaphore bulk_remove_event;
10698 + struct mutex bulk_mutex;
10700 + struct service_stats_struct {
10701 + int quota_stalls;
10705 + int ctrl_tx_count;
10706 + int ctrl_rx_count;
10707 + int bulk_tx_count;
10708 + int bulk_rx_count;
10709 + int bulk_aborted_count;
10710 + uint64_t ctrl_tx_bytes;
10711 + uint64_t ctrl_rx_bytes;
10712 + uint64_t bulk_tx_bytes;
10713 + uint64_t bulk_rx_bytes;
10715 +} VCHIQ_SERVICE_T;
10717 +/* The quota information is outside VCHIQ_SERVICE_T so that it can be
10718 + statically allocated, since for accounting reasons a service's slot
10719 + usage is carried over between users of the same port number.
10721 +typedef struct vchiq_service_quota_struct {
10722 + unsigned short slot_quota;
10723 + unsigned short slot_use_count;
10724 + unsigned short message_quota;
10725 + unsigned short message_use_count;
10726 + struct semaphore quota_event;
10727 + int previous_tx_index;
10728 +} VCHIQ_SERVICE_QUOTA_T;
10730 +typedef struct vchiq_shared_state_struct {
10732 + /* A non-zero value here indicates that the content is valid. */
10735 + /* The first and last (inclusive) slots allocated to the owner. */
10739 + /* The slot allocated to synchronous messages from the owner. */
10742 + /* Signalling this event indicates that owner's slot handler thread
10743 + ** should run. */
10744 + REMOTE_EVENT_T trigger;
10746 + /* Indicates the byte position within the stream where the next message
10747 + ** will be written. The least significant bits are an index into the
10748 + ** slot. The next bits are the index of the slot in slot_queue. */
10751 + /* This event should be signalled when a slot is recycled. */
10752 + REMOTE_EVENT_T recycle;
10754 + /* The slot_queue index where the next recycled slot will be written. */
10755 + int slot_queue_recycle;
10757 + /* This event should be signalled when a synchronous message is sent. */
10758 + REMOTE_EVENT_T sync_trigger;
10760 + /* This event should be signalled when a synchronous message has been
10762 + REMOTE_EVENT_T sync_release;
10764 + /* A circular buffer of slot indexes. */
10765 + int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
10767 + /* Debugging state */
10768 + int debug[DEBUG_MAX];
10769 +} VCHIQ_SHARED_STATE_T;
10771 +typedef struct vchiq_slot_zero_struct {
10774 + short version_min;
10775 + int slot_zero_size;
10778 + int max_slots_per_side;
10779 + int platform_data[2];
10780 + VCHIQ_SHARED_STATE_T master;
10781 + VCHIQ_SHARED_STATE_T slave;
10782 + VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
10783 +} VCHIQ_SLOT_ZERO_T;
10785 +struct vchiq_state_struct {
10788 + VCHIQ_CONNSTATE_T conn_state;
10791 + VCHIQ_SHARED_STATE_T *local;
10792 + VCHIQ_SHARED_STATE_T *remote;
10793 + VCHIQ_SLOT_T *slot_data;
10795 + unsigned short default_slot_quota;
10796 + unsigned short default_message_quota;
10798 + /* Event indicating connect message received */
10799 + struct semaphore connect;
10801 + /* Mutex protecting services */
10802 + struct mutex mutex;
10803 + VCHIQ_INSTANCE_T *instance;
10805 + /* Processes incoming messages */
10806 + struct task_struct *slot_handler_thread;
10808 + /* Processes recycled slots */
10809 + struct task_struct *recycle_thread;
10811 + /* Processes synchronous messages */
10812 + struct task_struct *sync_thread;
10814 + /* Local implementation of the trigger remote event */
10815 + struct semaphore trigger_event;
10817 + /* Local implementation of the recycle remote event */
10818 + struct semaphore recycle_event;
10820 + /* Local implementation of the sync trigger remote event */
10821 + struct semaphore sync_trigger_event;
10823 + /* Local implementation of the sync release remote event */
10824 + struct semaphore sync_release_event;
10828 + VCHIQ_SLOT_INFO_T *rx_info;
10830 + struct mutex slot_mutex;
10832 + struct mutex recycle_mutex;
10834 + struct mutex sync_mutex;
10836 + struct mutex bulk_transfer_mutex;
10838 + /* Indicates the byte position within the stream from where the next
10839 + ** message will be read. The least significant bits are an index into
10840 + ** the slot.The next bits are the index of the slot in
10841 + ** remote->slot_queue. */
10844 + /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
10845 + from remote->tx_pos. */
10846 + int local_tx_pos;
10848 + /* The slot_queue index of the slot to become available next. */
10849 + int slot_queue_available;
10851 + /* A flag to indicate if any poll has been requested */
10854 + /* Ths index of the previous slot used for data messages. */
10855 + int previous_data_index;
10857 + /* The number of slots occupied by data messages. */
10858 + unsigned short data_use_count;
10860 + /* The maximum number of slots to be occupied by data messages. */
10861 + unsigned short data_quota;
10863 + /* An array of bit sets indicating which services must be polled. */
10864 + atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
10866 + /* The number of the first unused service */
10867 + int unused_service;
10869 + /* Signalled when a free slot becomes available. */
10870 + struct semaphore slot_available_event;
10872 + struct semaphore slot_remove_event;
10874 + /* Signalled when a free data slot becomes available. */
10875 + struct semaphore data_quota_event;
10877 + /* Incremented when there are bulk transfers which cannot be processed
10878 + * whilst paused and must be processed on resume */
10879 + int deferred_bulks;
10881 + struct state_stats_struct {
10884 + int ctrl_tx_count;
10885 + int ctrl_rx_count;
10889 + VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
10890 + VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
10891 + VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
10893 + VCHIQ_PLATFORM_STATE_T platform_state;
10896 +struct bulk_waiter {
10897 + VCHIQ_BULK_T *bulk;
10898 + struct semaphore event;
10902 +extern spinlock_t bulk_waiter_spinlock;
10904 +extern int vchiq_core_log_level;
10905 +extern int vchiq_core_msg_log_level;
10906 +extern int vchiq_sync_log_level;
10908 +extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
10910 +extern const char *
10911 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
10913 +extern VCHIQ_SLOT_ZERO_T *
10914 +vchiq_init_slots(void *mem_base, int mem_size);
10916 +extern VCHIQ_STATUS_T
10917 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
10920 +extern VCHIQ_STATUS_T
10921 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
10923 +extern VCHIQ_SERVICE_T *
10924 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
10925 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
10926 + VCHIQ_INSTANCE_T instance);
10928 +extern VCHIQ_STATUS_T
10929 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
10931 +extern VCHIQ_STATUS_T
10932 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
10935 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
10938 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
10940 +extern VCHIQ_STATUS_T
10941 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
10943 +extern VCHIQ_STATUS_T
10944 +vchiq_pause_internal(VCHIQ_STATE_T *state);
10946 +extern VCHIQ_STATUS_T
10947 +vchiq_resume_internal(VCHIQ_STATE_T *state);
10950 +remote_event_pollall(VCHIQ_STATE_T *state);
10952 +extern VCHIQ_STATUS_T
10953 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
10954 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
10955 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
10958 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
10961 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
10964 +vchiq_loud_error_header(void);
10967 +vchiq_loud_error_footer(void);
10970 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
10972 +static inline VCHIQ_SERVICE_T *
10973 +handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
10975 + VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
10976 + (VCHIQ_MAX_STATES - 1)];
10980 + return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
10983 +extern VCHIQ_SERVICE_T *
10984 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
10986 +extern VCHIQ_SERVICE_T *
10987 +find_service_by_port(VCHIQ_STATE_T *state, int localport);
10989 +extern VCHIQ_SERVICE_T *
10990 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
10991 + VCHIQ_SERVICE_HANDLE_T handle);
10993 +extern VCHIQ_SERVICE_T *
10994 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
10998 +lock_service(VCHIQ_SERVICE_T *service);
11001 +unlock_service(VCHIQ_SERVICE_T *service);
11003 +/* The following functions are called from vchiq_core, and external
11004 +** implementations must be provided. */
11006 +extern VCHIQ_STATUS_T
11007 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
11008 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
11011 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
11014 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
11016 +extern VCHIQ_STATUS_T
11017 +vchiq_copy_from_user(void *dst, const void *src, int size);
11020 +remote_event_signal(REMOTE_EVENT_T *event);
11023 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
11026 +vchiq_platform_paused(VCHIQ_STATE_T *state);
11028 +extern VCHIQ_STATUS_T
11029 +vchiq_platform_resume(VCHIQ_STATE_T *state);
11032 +vchiq_platform_resumed(VCHIQ_STATE_T *state);
11035 +vchiq_dump(void *dump_context, const char *str, int len);
11038 +vchiq_dump_platform_state(void *dump_context);
11041 +vchiq_dump_platform_instances(void *dump_context);
11044 +vchiq_dump_platform_service_state(void *dump_context,
11045 + VCHIQ_SERVICE_T *service);
11047 +extern VCHIQ_STATUS_T
11048 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
11050 +extern VCHIQ_STATUS_T
11051 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
11054 +vchiq_on_remote_use(VCHIQ_STATE_T *state);
11057 +vchiq_on_remote_release(VCHIQ_STATE_T *state);
11059 +extern VCHIQ_STATUS_T
11060 +vchiq_platform_init_state(VCHIQ_STATE_T *state);
11062 +extern VCHIQ_STATUS_T
11063 +vchiq_check_service(VCHIQ_SERVICE_T *service);
11066 +vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
11068 +extern VCHIQ_STATUS_T
11069 +vchiq_send_remote_use(VCHIQ_STATE_T *state);
11071 +extern VCHIQ_STATUS_T
11072 +vchiq_send_remote_release(VCHIQ_STATE_T *state);
11074 +extern VCHIQ_STATUS_T
11075 +vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
11078 +vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
11079 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
11082 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
11085 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
11089 +vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
11090 + size_t numBytes);
11093 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
11094 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion 1970-01-01 01:00:00.000000000 +0100
11095 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion 2013-07-06 15:25:50.000000000 +0100
11097 +#!/usr/bin/perl -w
11102 +# Generate a version from available information
11105 +my $prefix = shift @ARGV;
11106 +my $root = shift @ARGV;
11109 +if ( not defined $root ) {
11110 + die "usage: $0 prefix root-dir\n";
11113 +if ( ! -d $root ) {
11114 + die "root directory $root not found\n";
11117 +my $version = "unknown";
11120 +if ( -d "$root/.git" ) {
11121 + # attempt to work out git version. only do so
11122 + # on a linux build host, as cygwin builds are
11123 + # already slow enough
11125 + if ( -f "/usr/bin/git" || -f "/usr/local/bin/git" ) {
11126 + if (not open(F, "git --git-dir $root/.git rev-parse --verify HEAD|")) {
11127 + $version = "no git version";
11131 + $version =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
11132 + $version =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
11135 + if (open(G, "git --git-dir $root/.git status --porcelain|")) {
11137 + $tainted =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
11138 + $tainted =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
11139 + if (length $tainted) {
11140 + $version = join ' ', $version, "(tainted)";
11143 + $version = join ' ', $version, "(clean)";
11149 +my $hostname = `hostname`;
11150 +$hostname =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
11151 +$hostname =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
11154 +print STDERR "Version $version\n";
11156 +#include "${prefix}_build_info.h"
11157 +#include <linux/broadcom/vc_debug_sym.h>
11159 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_hostname, "$hostname" );
11160 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_version, "$version" );
11161 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_time, __TIME__ );
11162 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_date, __DATE__ );
11164 +const char *vchiq_get_build_hostname( void )
11166 + return vchiq_build_hostname;
11169 +const char *vchiq_get_build_version( void )
11171 + return vchiq_build_version;
11174 +const char *vchiq_get_build_date( void )
11176 + return vchiq_build_date;
11179 +const char *vchiq_get_build_time( void )
11181 + return vchiq_build_time;
11186 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
11187 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h 1970-01-01 01:00:00.000000000 +0100
11188 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h 2013-07-06 15:25:50.000000000 +0100
11191 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11193 + * Redistribution and use in source and binary forms, with or without
11194 + * modification, are permitted provided that the following conditions
11196 + * 1. Redistributions of source code must retain the above copyright
11197 + * notice, this list of conditions, and the following disclaimer,
11198 + * without modification.
11199 + * 2. Redistributions in binary form must reproduce the above copyright
11200 + * notice, this list of conditions and the following disclaimer in the
11201 + * documentation and/or other materials provided with the distribution.
11202 + * 3. The names of the above-listed copyright holders may not be used
11203 + * to endorse or promote products derived from this software without
11204 + * specific prior written permission.
11206 + * ALTERNATIVELY, this software may be distributed under the terms of the
11207 + * GNU General Public License ("GPL") version 2, as published by the Free
11208 + * Software Foundation.
11210 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11211 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11212 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11213 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11214 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11215 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11216 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11217 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11218 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11219 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11220 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11223 +#ifndef VCHIQ_VCHIQ_H
11224 +#define VCHIQ_VCHIQ_H
11226 +#include "vchiq_if.h"
11227 +#include "vchiq_util.h"
11231 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
11232 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h 1970-01-01 01:00:00.000000000 +0100
11233 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h 2013-07-06 15:25:50.000000000 +0100
11236 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11238 + * Redistribution and use in source and binary forms, with or without
11239 + * modification, are permitted provided that the following conditions
11241 + * 1. Redistributions of source code must retain the above copyright
11242 + * notice, this list of conditions, and the following disclaimer,
11243 + * without modification.
11244 + * 2. Redistributions in binary form must reproduce the above copyright
11245 + * notice, this list of conditions and the following disclaimer in the
11246 + * documentation and/or other materials provided with the distribution.
11247 + * 3. The names of the above-listed copyright holders may not be used
11248 + * to endorse or promote products derived from this software without
11249 + * specific prior written permission.
11251 + * ALTERNATIVELY, this software may be distributed under the terms of the
11252 + * GNU General Public License ("GPL") version 2, as published by the Free
11253 + * Software Foundation.
11255 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11256 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11257 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11258 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11259 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11260 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11261 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11262 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11263 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11264 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11265 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11268 +#ifndef VCHIQ_IF_H
11269 +#define VCHIQ_IF_H
11271 +#include "interface/vchi/vchi_mh.h"
11273 +#define VCHIQ_SERVICE_HANDLE_INVALID 0
11275 +#define VCHIQ_SLOT_SIZE 4096
11276 +#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(VCHIQ_HEADER_T))
11277 +#define VCHIQ_CHANNEL_SIZE VCHIQ_MAX_MSG_SIZE /* For backwards compatibility */
11279 +#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
11280 + (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
11281 +#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
11282 +#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
11285 + VCHIQ_SERVICE_OPENED, /* service, -, - */
11286 + VCHIQ_SERVICE_CLOSED, /* service, -, - */
11287 + VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
11288 + VCHIQ_BULK_TRANSMIT_DONE, /* service, -, bulk_userdata */
11289 + VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
11290 + VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
11291 + VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
11295 + VCHIQ_ERROR = -1,
11296 + VCHIQ_SUCCESS = 0,
11301 + VCHIQ_BULK_MODE_CALLBACK,
11302 + VCHIQ_BULK_MODE_BLOCKING,
11303 + VCHIQ_BULK_MODE_NOCALLBACK,
11304 + VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
11305 +} VCHIQ_BULK_MODE_T;
11308 + VCHIQ_SERVICE_OPTION_AUTOCLOSE,
11309 + VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
11310 + VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
11311 + VCHIQ_SERVICE_OPTION_SYNCHRONOUS
11312 +} VCHIQ_SERVICE_OPTION_T;
11314 +typedef struct vchiq_header_struct {
11315 + /* The message identifier - opaque to applications. */
11318 + /* Size of message data. */
11319 + unsigned int size;
11321 + char data[0]; /* message */
11325 + const void *data;
11326 + unsigned int size;
11327 +} VCHIQ_ELEMENT_T;
11329 +typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
11331 +typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T, VCHIQ_HEADER_T *,
11332 + VCHIQ_SERVICE_HANDLE_T, void *);
11334 +typedef struct vchiq_service_base_struct {
11336 + VCHIQ_CALLBACK_T callback;
11338 +} VCHIQ_SERVICE_BASE_T;
11340 +typedef struct vchiq_service_params_struct {
11342 + VCHIQ_CALLBACK_T callback;
11344 + short version; /* Increment for non-trivial changes */
11345 + short version_min; /* Update for incompatible changes */
11346 +} VCHIQ_SERVICE_PARAMS_T;
11348 +typedef struct vchiq_config_struct {
11349 + unsigned int max_msg_size;
11350 + unsigned int bulk_threshold; /* The message size above which it
11351 + is better to use a bulk transfer
11352 + (<= max_msg_size) */
11353 + unsigned int max_outstanding_bulks;
11354 + unsigned int max_services;
11355 + short version; /* The version of VCHIQ */
11356 + short version_min; /* The minimum compatible version of VCHIQ */
11359 +typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
11360 +typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
11362 +extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
11363 +extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
11364 +extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
11365 +extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
11366 + const VCHIQ_SERVICE_PARAMS_T *params,
11367 + VCHIQ_SERVICE_HANDLE_T *pservice);
11368 +extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
11369 + const VCHIQ_SERVICE_PARAMS_T *params,
11370 + VCHIQ_SERVICE_HANDLE_T *pservice);
11371 +extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
11372 +extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
11373 +extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
11374 +extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
11375 + VCHIQ_SERVICE_HANDLE_T service);
11376 +extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
11378 +extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
11379 + const VCHIQ_ELEMENT_T *elements, unsigned int count);
11380 +extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
11381 + VCHIQ_HEADER_T *header);
11382 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
11383 + const void *data, unsigned int size, void *userdata);
11384 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
11385 + void *data, unsigned int size, void *userdata);
11386 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit_handle(
11387 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
11388 + const void *offset, unsigned int size, void *userdata);
11389 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive_handle(
11390 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
11391 + void *offset, unsigned int size, void *userdata);
11392 +extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
11393 + const void *data, unsigned int size, void *userdata,
11394 + VCHIQ_BULK_MODE_T mode);
11395 +extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
11396 + void *data, unsigned int size, void *userdata,
11397 + VCHIQ_BULK_MODE_T mode);
11398 +extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
11399 + VCHI_MEM_HANDLE_T handle, const void *offset, unsigned int size,
11400 + void *userdata, VCHIQ_BULK_MODE_T mode);
11401 +extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
11402 + VCHI_MEM_HANDLE_T handle, void *offset, unsigned int size,
11403 + void *userdata, VCHIQ_BULK_MODE_T mode);
11404 +extern int vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
11405 +extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
11406 +extern int vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
11407 +extern VCHIQ_STATUS_T vchiq_get_config(VCHIQ_INSTANCE_T instance,
11408 + int config_size, VCHIQ_CONFIG_T *pconfig);
11409 +extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
11410 + VCHIQ_SERVICE_OPTION_T option, int value);
11412 +extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
11413 + VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
11414 +extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
11416 +extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
11417 + void *ptr, size_t num_bytes);
11419 +extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
11420 + short *peer_version);
11422 +#endif /* VCHIQ_IF_H */
11423 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
11424 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h 1970-01-01 01:00:00.000000000 +0100
11425 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h 2013-07-06 15:25:50.000000000 +0100
11428 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11430 + * Redistribution and use in source and binary forms, with or without
11431 + * modification, are permitted provided that the following conditions
11433 + * 1. Redistributions of source code must retain the above copyright
11434 + * notice, this list of conditions, and the following disclaimer,
11435 + * without modification.
11436 + * 2. Redistributions in binary form must reproduce the above copyright
11437 + * notice, this list of conditions and the following disclaimer in the
11438 + * documentation and/or other materials provided with the distribution.
11439 + * 3. The names of the above-listed copyright holders may not be used
11440 + * to endorse or promote products derived from this software without
11441 + * specific prior written permission.
11443 + * ALTERNATIVELY, this software may be distributed under the terms of the
11444 + * GNU General Public License ("GPL") version 2, as published by the Free
11445 + * Software Foundation.
11447 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11448 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11449 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11450 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11451 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11452 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11453 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11454 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11455 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11456 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11457 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11460 +#ifndef VCHIQ_IOCTLS_H
11461 +#define VCHIQ_IOCTLS_H
11463 +#include <linux/ioctl.h>
11464 +#include "vchiq_if.h"
11466 +#define VCHIQ_IOC_MAGIC 0xc4
11467 +#define VCHIQ_INVALID_HANDLE (~0)
11470 + VCHIQ_SERVICE_PARAMS_T params;
11473 + unsigned int handle; /* OUT */
11474 +} VCHIQ_CREATE_SERVICE_T;
11477 + unsigned int handle;
11478 + unsigned int count;
11479 + const VCHIQ_ELEMENT_T *elements;
11480 +} VCHIQ_QUEUE_MESSAGE_T;
11483 + unsigned int handle;
11485 + unsigned int size;
11487 + VCHIQ_BULK_MODE_T mode;
11488 +} VCHIQ_QUEUE_BULK_TRANSFER_T;
11491 + VCHIQ_REASON_T reason;
11492 + VCHIQ_HEADER_T *header;
11493 + void *service_userdata;
11494 + void *bulk_userdata;
11495 +} VCHIQ_COMPLETION_DATA_T;
11498 + unsigned int count;
11499 + VCHIQ_COMPLETION_DATA_T *buf;
11500 + unsigned int msgbufsize;
11501 + unsigned int msgbufcount; /* IN/OUT */
11503 +} VCHIQ_AWAIT_COMPLETION_T;
11506 + unsigned int handle;
11508 + unsigned int bufsize;
11510 +} VCHIQ_DEQUEUE_MESSAGE_T;
11513 + unsigned int config_size;
11514 + VCHIQ_CONFIG_T *pconfig;
11515 +} VCHIQ_GET_CONFIG_T;
11518 + unsigned int handle;
11519 + VCHIQ_SERVICE_OPTION_T option;
11521 +} VCHIQ_SET_SERVICE_OPTION_T;
11525 + size_t num_bytes;
11526 +} VCHIQ_DUMP_MEM_T;
11528 +#define VCHIQ_IOC_CONNECT _IO(VCHIQ_IOC_MAGIC, 0)
11529 +#define VCHIQ_IOC_SHUTDOWN _IO(VCHIQ_IOC_MAGIC, 1)
11530 +#define VCHIQ_IOC_CREATE_SERVICE \
11531 + _IOWR(VCHIQ_IOC_MAGIC, 2, VCHIQ_CREATE_SERVICE_T)
11532 +#define VCHIQ_IOC_REMOVE_SERVICE _IO(VCHIQ_IOC_MAGIC, 3)
11533 +#define VCHIQ_IOC_QUEUE_MESSAGE \
11534 + _IOW(VCHIQ_IOC_MAGIC, 4, VCHIQ_QUEUE_MESSAGE_T)
11535 +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
11536 + _IOWR(VCHIQ_IOC_MAGIC, 5, VCHIQ_QUEUE_BULK_TRANSFER_T)
11537 +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
11538 + _IOWR(VCHIQ_IOC_MAGIC, 6, VCHIQ_QUEUE_BULK_TRANSFER_T)
11539 +#define VCHIQ_IOC_AWAIT_COMPLETION \
11540 + _IOWR(VCHIQ_IOC_MAGIC, 7, VCHIQ_AWAIT_COMPLETION_T)
11541 +#define VCHIQ_IOC_DEQUEUE_MESSAGE \
11542 + _IOWR(VCHIQ_IOC_MAGIC, 8, VCHIQ_DEQUEUE_MESSAGE_T)
11543 +#define VCHIQ_IOC_GET_CLIENT_ID _IO(VCHIQ_IOC_MAGIC, 9)
11544 +#define VCHIQ_IOC_GET_CONFIG \
11545 + _IOWR(VCHIQ_IOC_MAGIC, 10, VCHIQ_GET_CONFIG_T)
11546 +#define VCHIQ_IOC_CLOSE_SERVICE _IO(VCHIQ_IOC_MAGIC, 11)
11547 +#define VCHIQ_IOC_USE_SERVICE _IO(VCHIQ_IOC_MAGIC, 12)
11548 +#define VCHIQ_IOC_RELEASE_SERVICE _IO(VCHIQ_IOC_MAGIC, 13)
11549 +#define VCHIQ_IOC_SET_SERVICE_OPTION \
11550 + _IOW(VCHIQ_IOC_MAGIC, 14, VCHIQ_SET_SERVICE_OPTION_T)
11551 +#define VCHIQ_IOC_DUMP_PHYS_MEM \
11552 + _IOW(VCHIQ_IOC_MAGIC, 15, VCHIQ_DUMP_MEM_T)
11553 +#define VCHIQ_IOC_MAX 15
11556 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
11557 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c 1970-01-01 01:00:00.000000000 +0100
11558 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c 2013-07-06 15:25:50.000000000 +0100
11561 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11563 + * Redistribution and use in source and binary forms, with or without
11564 + * modification, are permitted provided that the following conditions
11566 + * 1. Redistributions of source code must retain the above copyright
11567 + * notice, this list of conditions, and the following disclaimer,
11568 + * without modification.
11569 + * 2. Redistributions in binary form must reproduce the above copyright
11570 + * notice, this list of conditions and the following disclaimer in the
11571 + * documentation and/or other materials provided with the distribution.
11572 + * 3. The names of the above-listed copyright holders may not be used
11573 + * to endorse or promote products derived from this software without
11574 + * specific prior written permission.
11576 + * ALTERNATIVELY, this software may be distributed under the terms of the
11577 + * GNU General Public License ("GPL") version 2, as published by the Free
11578 + * Software Foundation.
11580 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11581 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11582 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11583 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11584 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11585 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11586 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11587 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11588 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11589 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11590 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11593 +/* ---- Include Files ---------------------------------------------------- */
11595 +#include <linux/kernel.h>
11596 +#include <linux/module.h>
11597 +#include <linux/mutex.h>
11599 +#include "vchiq_core.h"
11600 +#include "vchiq_arm.h"
11602 +/* ---- Public Variables ------------------------------------------------- */
11604 +/* ---- Private Constants and Types -------------------------------------- */
11606 +struct bulk_waiter_node {
11607 + struct bulk_waiter bulk_waiter;
11609 + struct list_head list;
11612 +struct vchiq_instance_struct {
11613 + VCHIQ_STATE_T *state;
11617 + struct list_head bulk_waiter_list;
11618 + struct mutex bulk_waiter_list_mutex;
11621 +static VCHIQ_STATUS_T
11622 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11623 + unsigned int size, VCHIQ_BULK_DIR_T dir);
11625 +/****************************************************************************
11627 +* vchiq_initialise
11629 +***************************************************************************/
11630 +#define VCHIQ_INIT_RETRIES 10
11631 +VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
11633 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
11634 + VCHIQ_STATE_T *state;
11635 + VCHIQ_INSTANCE_T instance = NULL;
11638 + vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
11640 + /* VideoCore may not be ready due to boot up timing.
11641 + It may never be ready if kernel and firmware are mismatched, so don't block forever. */
11642 + for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
11643 + state = vchiq_get_state();
11648 + if (i==VCHIQ_INIT_RETRIES) {
11649 + vchiq_log_error(vchiq_core_log_level,
11650 + "%s: videocore not initialized\n", __func__);
11652 + } else if (i>0) {
11653 + vchiq_log_warning(vchiq_core_log_level,
11654 + "%s: videocore initialized after %d retries\n", __func__, i);
11657 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
11659 + vchiq_log_error(vchiq_core_log_level,
11660 + "%s: error allocating vchiq instance\n", __func__);
11664 + instance->connected = 0;
11665 + instance->state = state;
11666 + mutex_init(&instance->bulk_waiter_list_mutex);
11667 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
11669 + *instanceOut = instance;
11671 + status = VCHIQ_SUCCESS;
11674 + vchiq_log_trace(vchiq_core_log_level,
11675 + "%s(%p): returning %d", __func__, instance, status);
11679 +EXPORT_SYMBOL(vchiq_initialise);
11681 +/****************************************************************************
11685 +***************************************************************************/
11687 +VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
11689 + VCHIQ_STATUS_T status;
11690 + VCHIQ_STATE_T *state = instance->state;
11692 + vchiq_log_trace(vchiq_core_log_level,
11693 + "%s(%p) called", __func__, instance);
11695 + if (mutex_lock_interruptible(&state->mutex) != 0)
11696 + return VCHIQ_RETRY;
11698 + /* Remove all services */
11699 + status = vchiq_shutdown_internal(state, instance);
11701 + mutex_unlock(&state->mutex);
11703 + vchiq_log_trace(vchiq_core_log_level,
11704 + "%s(%p): returning %d", __func__, instance, status);
11706 + if (status == VCHIQ_SUCCESS) {
11707 + struct list_head *pos, *next;
11708 + list_for_each_safe(pos, next,
11709 + &instance->bulk_waiter_list) {
11710 + struct bulk_waiter_node *waiter;
11711 + waiter = list_entry(pos,
11712 + struct bulk_waiter_node,
11715 + vchiq_log_info(vchiq_arm_log_level,
11716 + "bulk_waiter - cleaned up %x "
11718 + (unsigned int)waiter, waiter->pid);
11726 +EXPORT_SYMBOL(vchiq_shutdown);
11728 +/****************************************************************************
11730 +* vchiq_is_connected
11732 +***************************************************************************/
11734 +int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
11736 + return instance->connected;
11739 +/****************************************************************************
11743 +***************************************************************************/
11745 +VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
11747 + VCHIQ_STATUS_T status;
11748 + VCHIQ_STATE_T *state = instance->state;
11750 + vchiq_log_trace(vchiq_core_log_level,
11751 + "%s(%p) called", __func__, instance);
11753 + if (mutex_lock_interruptible(&state->mutex) != 0) {
11754 + vchiq_log_trace(vchiq_core_log_level,
11755 + "%s: call to mutex_lock failed", __func__);
11756 + status = VCHIQ_RETRY;
11759 + status = vchiq_connect_internal(state, instance);
11761 + if (status == VCHIQ_SUCCESS)
11762 + instance->connected = 1;
11764 + mutex_unlock(&state->mutex);
11767 + vchiq_log_trace(vchiq_core_log_level,
11768 + "%s(%p): returning %d", __func__, instance, status);
11772 +EXPORT_SYMBOL(vchiq_connect);
11774 +/****************************************************************************
11776 +* vchiq_add_service
11778 +***************************************************************************/
11780 +VCHIQ_STATUS_T vchiq_add_service(
11781 + VCHIQ_INSTANCE_T instance,
11782 + const VCHIQ_SERVICE_PARAMS_T *params,
11783 + VCHIQ_SERVICE_HANDLE_T *phandle)
11785 + VCHIQ_STATUS_T status;
11786 + VCHIQ_STATE_T *state = instance->state;
11787 + VCHIQ_SERVICE_T *service = NULL;
11790 + vchiq_log_trace(vchiq_core_log_level,
11791 + "%s(%p) called", __func__, instance);
11793 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
11795 + srvstate = vchiq_is_connected(instance)
11796 + ? VCHIQ_SRVSTATE_LISTENING
11797 + : VCHIQ_SRVSTATE_HIDDEN;
11799 + service = vchiq_add_service_internal(
11806 + *phandle = service->handle;
11807 + status = VCHIQ_SUCCESS;
11809 + status = VCHIQ_ERROR;
11811 + vchiq_log_trace(vchiq_core_log_level,
11812 + "%s(%p): returning %d", __func__, instance, status);
11816 +EXPORT_SYMBOL(vchiq_add_service);
11818 +/****************************************************************************
11820 +* vchiq_open_service
11822 +***************************************************************************/
11824 +VCHIQ_STATUS_T vchiq_open_service(
11825 + VCHIQ_INSTANCE_T instance,
11826 + const VCHIQ_SERVICE_PARAMS_T *params,
11827 + VCHIQ_SERVICE_HANDLE_T *phandle)
11829 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
11830 + VCHIQ_STATE_T *state = instance->state;
11831 + VCHIQ_SERVICE_T *service = NULL;
11833 + vchiq_log_trace(vchiq_core_log_level,
11834 + "%s(%p) called", __func__, instance);
11836 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
11838 + if (!vchiq_is_connected(instance))
11841 + service = vchiq_add_service_internal(state,
11843 + VCHIQ_SRVSTATE_OPENING,
11847 + status = vchiq_open_service_internal(service, current->pid);
11848 + if (status == VCHIQ_SUCCESS)
11849 + *phandle = service->handle;
11851 + vchiq_remove_service(service->handle);
11855 + vchiq_log_trace(vchiq_core_log_level,
11856 + "%s(%p): returning %d", __func__, instance, status);
11860 +EXPORT_SYMBOL(vchiq_open_service);
11863 +vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
11864 + const void *data, unsigned int size, void *userdata)
11866 + return vchiq_bulk_transfer(handle,
11867 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
11868 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
11870 +EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
11873 +vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11874 + unsigned int size, void *userdata)
11876 + return vchiq_bulk_transfer(handle,
11877 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
11878 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
11880 +EXPORT_SYMBOL(vchiq_queue_bulk_receive);
11883 +vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
11884 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
11886 + VCHIQ_STATUS_T status;
11889 + case VCHIQ_BULK_MODE_NOCALLBACK:
11890 + case VCHIQ_BULK_MODE_CALLBACK:
11891 + status = vchiq_bulk_transfer(handle,
11892 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
11893 + mode, VCHIQ_BULK_TRANSMIT);
11895 + case VCHIQ_BULK_MODE_BLOCKING:
11896 + status = vchiq_blocking_bulk_transfer(handle,
11897 + (void *)data, size, VCHIQ_BULK_TRANSMIT);
11900 + return VCHIQ_ERROR;
11905 +EXPORT_SYMBOL(vchiq_bulk_transmit);
11908 +vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11909 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
11911 + VCHIQ_STATUS_T status;
11914 + case VCHIQ_BULK_MODE_NOCALLBACK:
11915 + case VCHIQ_BULK_MODE_CALLBACK:
11916 + status = vchiq_bulk_transfer(handle,
11917 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
11918 + mode, VCHIQ_BULK_RECEIVE);
11920 + case VCHIQ_BULK_MODE_BLOCKING:
11921 + status = vchiq_blocking_bulk_transfer(handle,
11922 + (void *)data, size, VCHIQ_BULK_RECEIVE);
11925 + return VCHIQ_ERROR;
11930 +EXPORT_SYMBOL(vchiq_bulk_receive);
11932 +static VCHIQ_STATUS_T
11933 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11934 + unsigned int size, VCHIQ_BULK_DIR_T dir)
11936 + VCHIQ_INSTANCE_T instance;
11937 + VCHIQ_SERVICE_T *service;
11938 + VCHIQ_STATUS_T status;
11939 + struct bulk_waiter_node *waiter = NULL;
11940 + struct list_head *pos;
11942 + service = find_service_by_handle(handle);
11944 + return VCHIQ_ERROR;
11946 + instance = service->instance;
11948 + unlock_service(service);
11950 + mutex_lock(&instance->bulk_waiter_list_mutex);
11951 + list_for_each(pos, &instance->bulk_waiter_list) {
11952 + if (list_entry(pos, struct bulk_waiter_node,
11953 + list)->pid == current->pid) {
11954 + waiter = list_entry(pos,
11955 + struct bulk_waiter_node,
11961 + mutex_unlock(&instance->bulk_waiter_list_mutex);
11964 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
11966 + /* This thread has an outstanding bulk transfer. */
11967 + if ((bulk->data != data) ||
11968 + (bulk->size != size)) {
11969 + /* This is not a retry of the previous one.
11970 + ** Cancel the signal when the transfer
11972 + spin_lock(&bulk_waiter_spinlock);
11973 + bulk->userdata = NULL;
11974 + spin_unlock(&bulk_waiter_spinlock);
11980 + waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
11982 + vchiq_log_error(vchiq_core_log_level,
11983 + "%s - out of memory", __func__);
11984 + return VCHIQ_ERROR;
11988 + status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
11989 + data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
11991 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
11992 + !waiter->bulk_waiter.bulk) {
11993 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
11995 + /* Cancel the signal when the transfer
11997 + spin_lock(&bulk_waiter_spinlock);
11998 + bulk->userdata = NULL;
11999 + spin_unlock(&bulk_waiter_spinlock);
12003 + waiter->pid = current->pid;
12004 + mutex_lock(&instance->bulk_waiter_list_mutex);
12005 + list_add(&waiter->list, &instance->bulk_waiter_list);
12006 + mutex_unlock(&instance->bulk_waiter_list_mutex);
12007 + vchiq_log_info(vchiq_arm_log_level,
12008 + "saved bulk_waiter %x for pid %d",
12009 + (unsigned int)waiter, current->pid);
12014 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
12015 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h 1970-01-01 01:00:00.000000000 +0100
12016 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h 2013-07-06 15:25:50.000000000 +0100
12019 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12021 + * Redistribution and use in source and binary forms, with or without
12022 + * modification, are permitted provided that the following conditions
12024 + * 1. Redistributions of source code must retain the above copyright
12025 + * notice, this list of conditions, and the following disclaimer,
12026 + * without modification.
12027 + * 2. Redistributions in binary form must reproduce the above copyright
12028 + * notice, this list of conditions and the following disclaimer in the
12029 + * documentation and/or other materials provided with the distribution.
12030 + * 3. The names of the above-listed copyright holders may not be used
12031 + * to endorse or promote products derived from this software without
12032 + * specific prior written permission.
12034 + * ALTERNATIVELY, this software may be distributed under the terms of the
12035 + * GNU General Public License ("GPL") version 2, as published by the Free
12036 + * Software Foundation.
12038 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12039 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12040 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12041 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12042 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12043 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12044 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12045 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12046 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12047 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12048 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12051 +#ifndef VCHIQ_MEMDRV_H
12052 +#define VCHIQ_MEMDRV_H
12054 +/* ---- Include Files ----------------------------------------------------- */
12056 +#include <linux/kernel.h>
12057 +#include "vchiq_if.h"
12059 +/* ---- Constants and Types ---------------------------------------------- */
12062 + void *armSharedMemVirt;
12063 + dma_addr_t armSharedMemPhys;
12064 + size_t armSharedMemSize;
12066 + void *vcSharedMemVirt;
12067 + dma_addr_t vcSharedMemPhys;
12068 + size_t vcSharedMemSize;
12069 +} VCHIQ_SHARED_MEM_INFO_T;
12071 +/* ---- Variable Externs ------------------------------------------------- */
12073 +/* ---- Function Prototypes ---------------------------------------------- */
12075 +void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
12077 +VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
12079 +VCHIQ_STATUS_T vchiq_userdrv_create_instance(
12080 + const VCHIQ_PLATFORM_DATA_T * platform_data);
12082 +VCHIQ_STATUS_T vchiq_userdrv_suspend(
12083 + const VCHIQ_PLATFORM_DATA_T * platform_data);
12085 +VCHIQ_STATUS_T vchiq_userdrv_resume(
12086 + const VCHIQ_PLATFORM_DATA_T * platform_data);
12089 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
12090 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h 1970-01-01 01:00:00.000000000 +0100
12091 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h 2013-07-06 15:25:50.000000000 +0100
12094 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12096 + * Redistribution and use in source and binary forms, with or without
12097 + * modification, are permitted provided that the following conditions
12099 + * 1. Redistributions of source code must retain the above copyright
12100 + * notice, this list of conditions, and the following disclaimer,
12101 + * without modification.
12102 + * 2. Redistributions in binary form must reproduce the above copyright
12103 + * notice, this list of conditions and the following disclaimer in the
12104 + * documentation and/or other materials provided with the distribution.
12105 + * 3. The names of the above-listed copyright holders may not be used
12106 + * to endorse or promote products derived from this software without
12107 + * specific prior written permission.
12109 + * ALTERNATIVELY, this software may be distributed under the terms of the
12110 + * GNU General Public License ("GPL") version 2, as published by the Free
12111 + * Software Foundation.
12113 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12114 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12115 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12116 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12117 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12118 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12119 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12120 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12121 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12122 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12123 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12126 +#ifndef VCHIQ_PAGELIST_H
12127 +#define VCHIQ_PAGELIST_H
12130 +#define PAGE_SIZE 4096
12132 +#define CACHE_LINE_SIZE 32
12133 +#define PAGELIST_WRITE 0
12134 +#define PAGELIST_READ 1
12135 +#define PAGELIST_READ_WITH_FRAGMENTS 2
12137 +typedef struct pagelist_struct {
12138 + unsigned long length;
12139 + unsigned short type;
12140 + unsigned short offset;
12141 + unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
12142 + pages at consecutive addresses. */
12145 +typedef struct fragments_struct {
12146 + char headbuf[CACHE_LINE_SIZE];
12147 + char tailbuf[CACHE_LINE_SIZE];
12150 +#endif /* VCHIQ_PAGELIST_H */
12151 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
12152 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c 1970-01-01 01:00:00.000000000 +0100
12153 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c 2013-07-06 15:25:50.000000000 +0100
12156 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12158 + * Redistribution and use in source and binary forms, with or without
12159 + * modification, are permitted provided that the following conditions
12161 + * 1. Redistributions of source code must retain the above copyright
12162 + * notice, this list of conditions, and the following disclaimer,
12163 + * without modification.
12164 + * 2. Redistributions in binary form must reproduce the above copyright
12165 + * notice, this list of conditions and the following disclaimer in the
12166 + * documentation and/or other materials provided with the distribution.
12167 + * 3. The names of the above-listed copyright holders may not be used
12168 + * to endorse or promote products derived from this software without
12169 + * specific prior written permission.
12171 + * ALTERNATIVELY, this software may be distributed under the terms of the
12172 + * GNU General Public License ("GPL") version 2, as published by the Free
12173 + * Software Foundation.
12175 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12176 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12177 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12178 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12179 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12180 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12181 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12182 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12183 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12184 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12185 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12189 +#include <linux/proc_fs.h>
12190 +#include "vchiq_core.h"
12191 +#include "vchiq_arm.h"
12193 +struct vchiq_proc_info {
12194 + /* Global 'vc' proc entry used by all instances */
12195 + struct proc_dir_entry *vc_cfg_dir;
12197 + /* one entry per client process */
12198 + struct proc_dir_entry *clients;
12200 + /* log categories */
12201 + struct proc_dir_entry *log_categories;
12204 +static struct vchiq_proc_info proc_info;
12206 +struct proc_dir_entry *vchiq_proc_top(void)
12208 + BUG_ON(proc_info.vc_cfg_dir == NULL);
12209 + return proc_info.vc_cfg_dir;
12212 +/****************************************************************************
12214 +* log category entries
12216 +***************************************************************************/
12217 +#define PROC_WRITE_BUF_SIZE 256
12219 +#define VCHIQ_LOG_ERROR_STR "error"
12220 +#define VCHIQ_LOG_WARNING_STR "warning"
12221 +#define VCHIQ_LOG_INFO_STR "info"
12222 +#define VCHIQ_LOG_TRACE_STR "trace"
12224 +static int log_cfg_read(char *buffer,
12232 + char *log_value = NULL;
12234 + switch (*((int *)data)) {
12235 + case VCHIQ_LOG_ERROR:
12236 + log_value = VCHIQ_LOG_ERROR_STR;
12238 + case VCHIQ_LOG_WARNING:
12239 + log_value = VCHIQ_LOG_WARNING_STR;
12241 + case VCHIQ_LOG_INFO:
12242 + log_value = VCHIQ_LOG_INFO_STR;
12244 + case VCHIQ_LOG_TRACE:
12245 + log_value = VCHIQ_LOG_TRACE_STR;
12251 + len += sprintf(buffer + len,
12253 + log_value ? log_value : "(null)");
12259 +static int log_cfg_write(struct file *file,
12260 + const char __user *buffer,
12261 + unsigned long count,
12264 + int *log_module = data;
12265 + char kbuf[PROC_WRITE_BUF_SIZE + 1];
12269 + memset(kbuf, 0, PROC_WRITE_BUF_SIZE + 1);
12270 + if (count >= PROC_WRITE_BUF_SIZE)
12271 + count = PROC_WRITE_BUF_SIZE;
12273 + if (copy_from_user(kbuf,
12277 + kbuf[count - 1] = 0;
12279 + if (strncmp("error", kbuf, strlen("error")) == 0)
12280 + *log_module = VCHIQ_LOG_ERROR;
12281 + else if (strncmp("warning", kbuf, strlen("warning")) == 0)
12282 + *log_module = VCHIQ_LOG_WARNING;
12283 + else if (strncmp("info", kbuf, strlen("info")) == 0)
12284 + *log_module = VCHIQ_LOG_INFO;
12285 + else if (strncmp("trace", kbuf, strlen("trace")) == 0)
12286 + *log_module = VCHIQ_LOG_TRACE;
12288 + *log_module = VCHIQ_LOG_DEFAULT;
12293 +/* Log category proc entries */
12294 +struct vchiq_proc_log_entry {
12295 + const char *name;
12297 + struct proc_dir_entry *dir;
12300 +static struct vchiq_proc_log_entry vchiq_proc_log_entries[] = {
12301 + { "core", &vchiq_core_log_level },
12302 + { "msg", &vchiq_core_msg_log_level },
12303 + { "sync", &vchiq_sync_log_level },
12304 + { "susp", &vchiq_susp_log_level },
12305 + { "arm", &vchiq_arm_log_level },
12307 +static int n_log_entries =
12308 + sizeof(vchiq_proc_log_entries)/sizeof(vchiq_proc_log_entries[0]);
12310 +/* create an entry under /proc/vc/log for each log category */
12311 +static int vchiq_proc_create_log_entries(struct proc_dir_entry *top)
12313 + struct proc_dir_entry *dir;
12317 + dir = proc_mkdir("log", proc_info.vc_cfg_dir);
12320 + proc_info.log_categories = dir;
12322 + for (i = 0; i < n_log_entries; i++) {
12323 + dir = create_proc_entry(vchiq_proc_log_entries[i].name,
12325 + proc_info.log_categories);
12331 + dir->read_proc = &log_cfg_read;
12332 + dir->write_proc = &log_cfg_write;
12333 + dir->data = (void *)vchiq_proc_log_entries[i].plevel;
12335 + vchiq_proc_log_entries[i].dir = dir;
12342 +int vchiq_proc_init(void)
12344 + BUG_ON(proc_info.vc_cfg_dir != NULL);
12346 + proc_info.vc_cfg_dir = proc_mkdir("vc", NULL);
12347 + if (proc_info.vc_cfg_dir == NULL)
12350 + proc_info.clients = proc_mkdir("clients",
12351 + proc_info.vc_cfg_dir);
12352 + if (!proc_info.clients)
12355 + if (vchiq_proc_create_log_entries(proc_info.vc_cfg_dir) != 0)
12361 + vchiq_proc_deinit();
12362 + vchiq_log_error(vchiq_arm_log_level,
12363 + "%s: failed to create proc directory",
12369 +/* remove all the proc entries */
12370 +void vchiq_proc_deinit(void)
12372 + /* log category entries */
12374 + if (proc_info.log_categories) {
12376 + for (i = 0; i < n_log_entries; i++)
12377 + if (vchiq_proc_log_entries[i].dir)
12378 + remove_proc_entry(
12379 + vchiq_proc_log_entries[i].name,
12380 + proc_info.log_categories);
12382 + remove_proc_entry(proc_info.log_categories->name,
12383 + proc_info.vc_cfg_dir);
12385 + if (proc_info.clients)
12386 + remove_proc_entry(proc_info.clients->name,
12387 + proc_info.vc_cfg_dir);
12388 + if (proc_info.vc_cfg_dir)
12389 + remove_proc_entry(proc_info.vc_cfg_dir->name, NULL);
12393 +struct proc_dir_entry *vchiq_clients_top(void)
12395 + return proc_info.clients;
12398 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
12399 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c 1970-01-01 01:00:00.000000000 +0100
12400 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c 2013-07-06 15:25:50.000000000 +0100
12403 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12405 + * Redistribution and use in source and binary forms, with or without
12406 + * modification, are permitted provided that the following conditions
12408 + * 1. Redistributions of source code must retain the above copyright
12409 + * notice, this list of conditions, and the following disclaimer,
12410 + * without modification.
12411 + * 2. Redistributions in binary form must reproduce the above copyright
12412 + * notice, this list of conditions and the following disclaimer in the
12413 + * documentation and/or other materials provided with the distribution.
12414 + * 3. The names of the above-listed copyright holders may not be used
12415 + * to endorse or promote products derived from this software without
12416 + * specific prior written permission.
12418 + * ALTERNATIVELY, this software may be distributed under the terms of the
12419 + * GNU General Public License ("GPL") version 2, as published by the Free
12420 + * Software Foundation.
12422 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12423 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12424 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12425 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12426 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12427 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12428 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12429 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12430 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12431 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12432 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12434 +#include <linux/module.h>
12435 +#include <linux/types.h>
12437 +#include "interface/vchi/vchi.h"
12438 +#include "vchiq.h"
12439 +#include "vchiq_core.h"
12441 +#include "vchiq_util.h"
12443 +#include <stddef.h>
12445 +#define vchiq_status_to_vchi(status) ((int32_t)status)
12448 + VCHIQ_SERVICE_HANDLE_T handle;
12450 + VCHIU_QUEUE_T queue;
12452 + VCHI_CALLBACK_T callback;
12453 + void *callback_param;
12456 +/* ----------------------------------------------------------------------
12457 + * return pointer to the mphi message driver function table
12458 + * -------------------------------------------------------------------- */
12459 +const VCHI_MESSAGE_DRIVER_T *
12460 +vchi_mphi_message_driver_func_table(void)
12465 +/* ----------------------------------------------------------------------
12466 + * return a pointer to the 'single' connection driver fops
12467 + * -------------------------------------------------------------------- */
12468 +const VCHI_CONNECTION_API_T *
12469 +single_get_func_table(void)
12474 +VCHI_CONNECTION_T *vchi_create_connection(
12475 + const VCHI_CONNECTION_API_T *function_table,
12476 + const VCHI_MESSAGE_DRIVER_T *low_level)
12478 + (void)function_table;
12483 +/***********************************************************
12484 + * Name: vchi_msg_peek
12486 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
12488 + * uint32_t *msg_size,
12491 + * VCHI_FLAGS_T flags
12493 + * Description: Routine to return a pointer to the current message (to allow in
12494 + * place processing). The message can be removed using
12495 + * vchi_msg_remove when you're finished
12497 + * Returns: int32_t - success == 0
12499 + ***********************************************************/
12500 +int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
12502 + uint32_t *msg_size,
12503 + VCHI_FLAGS_T flags)
12505 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12506 + VCHIQ_HEADER_T *header;
12508 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
12509 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
12511 + if (flags == VCHI_FLAGS_NONE)
12512 + if (vchiu_queue_is_empty(&service->queue))
12515 + header = vchiu_queue_peek(&service->queue);
12517 + *data = header->data;
12518 + *msg_size = header->size;
12522 +EXPORT_SYMBOL(vchi_msg_peek);
12524 +/***********************************************************
12525 + * Name: vchi_msg_remove
12527 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
12529 + * Description: Routine to remove a message (after it has been read with
12532 + * Returns: int32_t - success == 0
12534 + ***********************************************************/
12535 +int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
12537 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12538 + VCHIQ_HEADER_T *header;
12540 + header = vchiu_queue_pop(&service->queue);
12542 + vchiq_release_message(service->handle, header);
12546 +EXPORT_SYMBOL(vchi_msg_remove);
12548 +/***********************************************************
12549 + * Name: vchi_msg_queue
12551 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12552 + * const void *data,
12553 + * uint32_t data_size,
12554 + * VCHI_FLAGS_T flags,
12555 + * void *msg_handle,
12557 + * Description: Thin wrapper to queue a message onto a connection
12559 + * Returns: int32_t - success == 0
12561 + ***********************************************************/
12562 +int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
12563 + const void *data,
12564 + uint32_t data_size,
12565 + VCHI_FLAGS_T flags,
12566 + void *msg_handle)
12568 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12569 + VCHIQ_ELEMENT_T element = {data, data_size};
12570 + VCHIQ_STATUS_T status;
12572 + (void)msg_handle;
12574 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
12576 + status = vchiq_queue_message(service->handle, &element, 1);
12578 + /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
12579 + ** implement a retry mechanism since this function is supposed
12580 + ** to block until queued
12582 + while (status == VCHIQ_RETRY) {
12584 + status = vchiq_queue_message(service->handle, &element, 1);
12587 + return vchiq_status_to_vchi(status);
12589 +EXPORT_SYMBOL(vchi_msg_queue);
12591 +/***********************************************************
12592 + * Name: vchi_bulk_queue_receive
12594 + * Arguments: VCHI_BULK_HANDLE_T handle,
12595 + * void *data_dst,
12596 + * const uint32_t data_size,
12597 + * VCHI_FLAGS_T flags
12598 + * void *bulk_handle
12600 + * Description: Routine to setup a rcv buffer
12602 + * Returns: int32_t - success == 0
12604 + ***********************************************************/
12605 +int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
12607 + uint32_t data_size,
12608 + VCHI_FLAGS_T flags,
12609 + void *bulk_handle)
12611 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12612 + VCHIQ_BULK_MODE_T mode;
12613 + VCHIQ_STATUS_T status;
12615 + switch ((int)flags) {
12616 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
12617 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12618 + WARN_ON(!service->callback);
12619 + mode = VCHIQ_BULK_MODE_CALLBACK;
12621 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
12622 + mode = VCHIQ_BULK_MODE_BLOCKING;
12624 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12625 + case VCHI_FLAGS_NONE:
12626 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
12629 + WARN(1, "unsupported message\n");
12630 + return vchiq_status_to_vchi(VCHIQ_ERROR);
12633 + status = vchiq_bulk_receive(service->handle, data_dst, data_size,
12634 + bulk_handle, mode);
12636 + /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
12637 + ** implement a retry mechanism since this function is supposed
12638 + ** to block until queued
12640 + while (status == VCHIQ_RETRY) {
12642 + status = vchiq_bulk_receive(service->handle, data_dst,
12643 + data_size, bulk_handle, mode);
12646 + return vchiq_status_to_vchi(status);
12648 +EXPORT_SYMBOL(vchi_bulk_queue_receive);
12650 +/***********************************************************
12651 + * Name: vchi_bulk_queue_transmit
12653 + * Arguments: VCHI_BULK_HANDLE_T handle,
12654 + * const void *data_src,
12655 + * uint32_t data_size,
12656 + * VCHI_FLAGS_T flags,
12657 + * void *bulk_handle
12659 + * Description: Routine to transmit some data
12661 + * Returns: int32_t - success == 0
12663 + ***********************************************************/
12664 +int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
12665 + const void *data_src,
12666 + uint32_t data_size,
12667 + VCHI_FLAGS_T flags,
12668 + void *bulk_handle)
12670 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12671 + VCHIQ_BULK_MODE_T mode;
12672 + VCHIQ_STATUS_T status;
12674 + switch ((int)flags) {
12675 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
12676 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12677 + WARN_ON(!service->callback);
12678 + mode = VCHIQ_BULK_MODE_CALLBACK;
12680 + case VCHI_FLAGS_BLOCK_UNTIL_DATA_READ:
12681 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
12682 + mode = VCHIQ_BULK_MODE_BLOCKING;
12684 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12685 + case VCHI_FLAGS_NONE:
12686 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
12689 + WARN(1, "unsupported message\n");
12690 + return vchiq_status_to_vchi(VCHIQ_ERROR);
12693 + status = vchiq_bulk_transmit(service->handle, data_src, data_size,
12694 + bulk_handle, mode);
12696 + /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
12697 + ** implement a retry mechanism since this function is supposed
12698 + ** to block until queued
12700 + while (status == VCHIQ_RETRY) {
12702 + status = vchiq_bulk_transmit(service->handle, data_src,
12703 + data_size, bulk_handle, mode);
12706 + return vchiq_status_to_vchi(status);
12708 +EXPORT_SYMBOL(vchi_bulk_queue_transmit);
12710 +/***********************************************************
12711 + * Name: vchi_msg_dequeue
12713 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12715 + * uint32_t max_data_size_to_read,
12716 + * uint32_t *actual_msg_size
12717 + * VCHI_FLAGS_T flags
12719 + * Description: Routine to dequeue a message into the supplied buffer
12721 + * Returns: int32_t - success == 0
12723 + ***********************************************************/
12724 +int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
12726 + uint32_t max_data_size_to_read,
12727 + uint32_t *actual_msg_size,
12728 + VCHI_FLAGS_T flags)
12730 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12731 + VCHIQ_HEADER_T *header;
12733 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
12734 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
12736 + if (flags == VCHI_FLAGS_NONE)
12737 + if (vchiu_queue_is_empty(&service->queue))
12740 + header = vchiu_queue_pop(&service->queue);
12742 + memcpy(data, header->data, header->size < max_data_size_to_read ?
12743 + header->size : max_data_size_to_read);
12745 + *actual_msg_size = header->size;
12747 + vchiq_release_message(service->handle, header);
12751 +EXPORT_SYMBOL(vchi_msg_dequeue);
12753 +/***********************************************************
12754 + * Name: vchi_msg_queuev
12756 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12757 + * VCHI_MSG_VECTOR_T *vector,
12758 + * uint32_t count,
12759 + * VCHI_FLAGS_T flags,
12760 + * void *msg_handle
12762 + * Description: Thin wrapper to queue a message onto a connection
12764 + * Returns: int32_t - success == 0
12766 + ***********************************************************/
12768 +vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
12769 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
12770 + offsetof(VCHIQ_ELEMENT_T, data));
12771 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
12772 + offsetof(VCHIQ_ELEMENT_T, size));
12774 +int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
12775 + VCHI_MSG_VECTOR_T *vector,
12777 + VCHI_FLAGS_T flags,
12778 + void *msg_handle)
12780 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12782 + (void)msg_handle;
12784 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
12786 + return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
12787 + (const VCHIQ_ELEMENT_T *)vector, count));
12789 +EXPORT_SYMBOL(vchi_msg_queuev);
12791 +/***********************************************************
12792 + * Name: vchi_held_msg_release
12794 + * Arguments: VCHI_HELD_MSG_T *message
12796 + * Description: Routine to release a held message (after it has been read with
12799 + * Returns: int32_t - success == 0
12801 + ***********************************************************/
12802 +int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
12804 + vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
12805 + (VCHIQ_HEADER_T *)message->message);
12810 +/***********************************************************
12811 + * Name: vchi_msg_hold
12813 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
12815 + * uint32_t *msg_size,
12816 + * VCHI_FLAGS_T flags,
12817 + * VCHI_HELD_MSG_T *message_handle
12819 + * Description: Routine to return a pointer to the current message (to allow
12820 + * in place processing). The message is dequeued - don't forget
12821 + * to release the message using vchi_held_msg_release when you're
12824 + * Returns: int32_t - success == 0
12826 + ***********************************************************/
12827 +int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
12829 + uint32_t *msg_size,
12830 + VCHI_FLAGS_T flags,
12831 + VCHI_HELD_MSG_T *message_handle)
12833 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12834 + VCHIQ_HEADER_T *header;
12836 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
12837 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
12839 + if (flags == VCHI_FLAGS_NONE)
12840 + if (vchiu_queue_is_empty(&service->queue))
12843 + header = vchiu_queue_pop(&service->queue);
12845 + *data = header->data;
12846 + *msg_size = header->size;
12848 + message_handle->service =
12849 + (struct opaque_vchi_service_t *)service->handle;
12850 + message_handle->message = header;
12855 +/***********************************************************
12856 + * Name: vchi_initialise
12858 + * Arguments: VCHI_INSTANCE_T *instance_handle
12859 + * VCHI_CONNECTION_T **connections
12860 + * const uint32_t num_connections
12862 + * Description: Initialises the hardware but does not transmit anything
12863 + * When run as a Host App this will be called twice hence the need
12864 + * to malloc the state information
12866 + * Returns: 0 if successful, failure otherwise
12868 + ***********************************************************/
12870 +int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
12872 + VCHIQ_INSTANCE_T instance;
12873 + VCHIQ_STATUS_T status;
12875 + status = vchiq_initialise(&instance);
12877 + *instance_handle = (VCHI_INSTANCE_T)instance;
12879 + return vchiq_status_to_vchi(status);
12881 +EXPORT_SYMBOL(vchi_initialise);
12883 +/***********************************************************
12884 + * Name: vchi_connect
12886 + * Arguments: VCHI_CONNECTION_T **connections
12887 + * const uint32_t num_connections
12888 + * VCHI_INSTANCE_T instance_handle)
12890 + * Description: Starts the command service on each connection,
12891 + * causing INIT messages to be pinged back and forth
12893 + * Returns: 0 if successful, failure otherwise
12895 + ***********************************************************/
12896 +int32_t vchi_connect(VCHI_CONNECTION_T **connections,
12897 + const uint32_t num_connections,
12898 + VCHI_INSTANCE_T instance_handle)
12900 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12902 + (void)connections;
12903 + (void)num_connections;
12905 + return vchiq_connect(instance);
12907 +EXPORT_SYMBOL(vchi_connect);
12910 +/***********************************************************
12911 + * Name: vchi_disconnect
12913 + * Arguments: VCHI_INSTANCE_T instance_handle
12915 + * Description: Stops the command service on each connection,
12916 + * causing DE-INIT messages to be pinged back and forth
12918 + * Returns: 0 if successful, failure otherwise
12920 + ***********************************************************/
12921 +int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
12923 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12924 + return vchiq_status_to_vchi(vchiq_shutdown(instance));
12926 +EXPORT_SYMBOL(vchi_disconnect);
12929 +/***********************************************************
12930 + * Name: vchi_service_open
12931 + * Name: vchi_service_create
12933 + * Arguments: VCHI_INSTANCE_T *instance_handle
12934 + * SERVICE_CREATION_T *setup,
12935 + * VCHI_SERVICE_HANDLE_T *handle
12937 + * Description: Routine to open a service
12939 + * Returns: int32_t - success == 0
12941 + ***********************************************************/
12943 +static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
12944 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
12946 + SHIM_SERVICE_T *service =
12947 + (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
12949 + switch (reason) {
12950 + case VCHIQ_MESSAGE_AVAILABLE:
12951 + vchiu_queue_push(&service->queue, header);
12953 + if (service->callback)
12954 + service->callback(service->callback_param,
12955 + VCHI_CALLBACK_MSG_AVAILABLE, NULL);
12957 + case VCHIQ_BULK_TRANSMIT_DONE:
12958 + if (service->callback)
12959 + service->callback(service->callback_param,
12960 + VCHI_CALLBACK_BULK_SENT, bulk_user);
12962 + case VCHIQ_BULK_RECEIVE_DONE:
12963 + if (service->callback)
12964 + service->callback(service->callback_param,
12965 + VCHI_CALLBACK_BULK_RECEIVED, bulk_user);
12967 + case VCHIQ_SERVICE_CLOSED:
12968 + if (service->callback)
12969 + service->callback(service->callback_param,
12970 + VCHI_CALLBACK_SERVICE_CLOSED, NULL);
12972 + case VCHIQ_SERVICE_OPENED:
12973 + /* No equivalent VCHI reason */
12975 + case VCHIQ_BULK_TRANSMIT_ABORTED:
12976 + if (service->callback)
12977 + service->callback(service->callback_param,
12978 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED, bulk_user);
12980 + case VCHIQ_BULK_RECEIVE_ABORTED:
12981 + if (service->callback)
12982 + service->callback(service->callback_param,
12983 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED, bulk_user);
12986 + WARN(1, "not supported\n");
12990 + return VCHIQ_SUCCESS;
12993 +static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
12994 + SERVICE_CREATION_T *setup)
12996 + SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
13001 + if (vchiu_queue_init(&service->queue, 64)) {
13002 + service->callback = setup->callback;
13003 + service->callback_param = setup->callback_param;
13013 +static void service_free(SHIM_SERVICE_T *service)
13016 + vchiu_queue_delete(&service->queue);
13021 +int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
13022 + SERVICE_CREATION_T *setup,
13023 + VCHI_SERVICE_HANDLE_T *handle)
13025 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
13026 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
13028 + VCHIQ_SERVICE_PARAMS_T params;
13029 + VCHIQ_STATUS_T status;
13031 + memset(¶ms, 0, sizeof(params));
13032 + params.fourcc = setup->service_id;
13033 + params.callback = shim_callback;
13034 + params.userdata = service;
13035 + params.version = setup->version.version;
13036 + params.version_min = setup->version.version_min;
13038 + status = vchiq_open_service(instance, ¶ms,
13039 + &service->handle);
13040 + if (status != VCHIQ_SUCCESS) {
13041 + service_free(service);
13046 + *handle = (VCHI_SERVICE_HANDLE_T)service;
13048 + return (service != NULL) ? 0 : -1;
13050 +EXPORT_SYMBOL(vchi_service_open);
13052 +int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
13053 + SERVICE_CREATION_T *setup,
13054 + VCHI_SERVICE_HANDLE_T *handle)
13056 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
13057 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
13059 + VCHIQ_SERVICE_PARAMS_T params;
13060 + VCHIQ_STATUS_T status;
13062 + memset(¶ms, 0, sizeof(params));
13063 + params.fourcc = setup->service_id;
13064 + params.callback = shim_callback;
13065 + params.userdata = service;
13066 + params.version = setup->version.version;
13067 + params.version_min = setup->version.version_min;
13068 + status = vchiq_add_service(instance, ¶ms, &service->handle);
13070 + if (status != VCHIQ_SUCCESS) {
13071 + service_free(service);
13076 + *handle = (VCHI_SERVICE_HANDLE_T)service;
13078 + return (service != NULL) ? 0 : -1;
13080 +EXPORT_SYMBOL(vchi_service_create);
13082 +int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
13084 + int32_t ret = -1;
13085 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13087 + VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
13088 + if (status == VCHIQ_SUCCESS) {
13089 + service_free(service);
13093 + ret = vchiq_status_to_vchi(status);
13097 +EXPORT_SYMBOL(vchi_service_close);
13099 +int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
13101 + int32_t ret = -1;
13102 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13104 + VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
13105 + if (status == VCHIQ_SUCCESS) {
13106 + service_free(service);
13110 + ret = vchiq_status_to_vchi(status);
13114 +EXPORT_SYMBOL(vchi_service_destroy);
13116 +int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle, short *peer_version )
13118 + int32_t ret = -1;
13119 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13122 + VCHIQ_STATUS_T status = vchiq_get_peer_version(service->handle, peer_version);
13123 + ret = vchiq_status_to_vchi( status );
13127 +EXPORT_SYMBOL(vchi_get_peer_version);
13129 +/* ----------------------------------------------------------------------
13130 + * read a uint32_t from buffer.
13131 + * network format is defined to be little endian
13132 + * -------------------------------------------------------------------- */
13134 +vchi_readbuf_uint32(const void *_ptr)
13136 + const unsigned char *ptr = _ptr;
13137 + return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
13140 +/* ----------------------------------------------------------------------
13141 + * write a uint32_t to buffer.
13142 + * network format is defined to be little endian
13143 + * -------------------------------------------------------------------- */
13145 +vchi_writebuf_uint32(void *_ptr, uint32_t value)
13147 + unsigned char *ptr = _ptr;
13148 + ptr[0] = (unsigned char)((value >> 0) & 0xFF);
13149 + ptr[1] = (unsigned char)((value >> 8) & 0xFF);
13150 + ptr[2] = (unsigned char)((value >> 16) & 0xFF);
13151 + ptr[3] = (unsigned char)((value >> 24) & 0xFF);
13154 +/* ----------------------------------------------------------------------
13155 + * read a uint16_t from buffer.
13156 + * network format is defined to be little endian
13157 + * -------------------------------------------------------------------- */
13159 +vchi_readbuf_uint16(const void *_ptr)
13161 + const unsigned char *ptr = _ptr;
13162 + return ptr[0] | (ptr[1] << 8);
13165 +/* ----------------------------------------------------------------------
13166 + * write a uint16_t into the buffer.
13167 + * network format is defined to be little endian
13168 + * -------------------------------------------------------------------- */
13170 +vchi_writebuf_uint16(void *_ptr, uint16_t value)
13172 + unsigned char *ptr = _ptr;
13173 + ptr[0] = (value >> 0) & 0xFF;
13174 + ptr[1] = (value >> 8) & 0xFF;
13177 +/***********************************************************
13178 + * Name: vchi_service_use
13180 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
13182 + * Description: Routine to increment refcount on a service
13186 + ***********************************************************/
13187 +int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
13189 + int32_t ret = -1;
13190 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13192 + ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
13195 +EXPORT_SYMBOL(vchi_service_use);
13197 +/***********************************************************
13198 + * Name: vchi_service_release
13200 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
13202 + * Description: Routine to decrement refcount on a service
13206 + ***********************************************************/
13207 +int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
13209 + int32_t ret = -1;
13210 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13212 + ret = vchiq_status_to_vchi(
13213 + vchiq_release_service(service->handle));
13216 +EXPORT_SYMBOL(vchi_service_release);
13217 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
13218 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c 1970-01-01 01:00:00.000000000 +0100
13219 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c 2013-07-06 15:25:50.000000000 +0100
13222 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
13224 + * Redistribution and use in source and binary forms, with or without
13225 + * modification, are permitted provided that the following conditions
13227 + * 1. Redistributions of source code must retain the above copyright
13228 + * notice, this list of conditions, and the following disclaimer,
13229 + * without modification.
13230 + * 2. Redistributions in binary form must reproduce the above copyright
13231 + * notice, this list of conditions and the following disclaimer in the
13232 + * documentation and/or other materials provided with the distribution.
13233 + * 3. The names of the above-listed copyright holders may not be used
13234 + * to endorse or promote products derived from this software without
13235 + * specific prior written permission.
13237 + * ALTERNATIVELY, this software may be distributed under the terms of the
13238 + * GNU General Public License ("GPL") version 2, as published by the Free
13239 + * Software Foundation.
13241 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
13242 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
13243 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
13244 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
13245 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
13246 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
13247 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
13248 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
13249 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
13250 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
13251 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13254 +#include "vchiq_util.h"
13256 +static inline int is_pow2(int i)
13258 + return i && !(i & (i - 1));
13261 +int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
13263 + WARN_ON(!is_pow2(size));
13265 + queue->size = size;
13267 + queue->write = 0;
13269 + sema_init(&queue->pop, 0);
13270 + sema_init(&queue->push, 0);
13272 + queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
13273 + if (queue->storage == NULL) {
13274 + vchiu_queue_delete(queue);
13280 +void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
13282 + if (queue->storage != NULL)
13283 + kfree(queue->storage);
13286 +int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
13288 + return queue->read == queue->write;
13291 +int vchiu_queue_is_full(VCHIU_QUEUE_T *queue)
13293 + return queue->write == queue->read + queue->size;
13296 +void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header)
13298 + while (queue->write == queue->read + queue->size) {
13299 + if (down_interruptible(&queue->pop) != 0) {
13300 + flush_signals(current);
13304 + queue->storage[queue->write & (queue->size - 1)] = header;
13308 + up(&queue->push);
13311 +VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue)
13313 + while (queue->write == queue->read) {
13314 + if (down_interruptible(&queue->push) != 0) {
13315 + flush_signals(current);
13319 + up(&queue->push); // We haven't removed anything from the queue.
13320 + return queue->storage[queue->read & (queue->size - 1)];
13323 +VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue)
13325 + VCHIQ_HEADER_T *header;
13327 + while (queue->write == queue->read) {
13328 + if (down_interruptible(&queue->push) != 0) {
13329 + flush_signals(current);
13333 + header = queue->storage[queue->read & (queue->size - 1)];
13341 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
13342 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h 1970-01-01 01:00:00.000000000 +0100
13343 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h 2013-07-06 15:25:50.000000000 +0100
13346 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
13348 + * Redistribution and use in source and binary forms, with or without
13349 + * modification, are permitted provided that the following conditions
13351 + * 1. Redistributions of source code must retain the above copyright
13352 + * notice, this list of conditions, and the following disclaimer,
13353 + * without modification.
13354 + * 2. Redistributions in binary form must reproduce the above copyright
13355 + * notice, this list of conditions and the following disclaimer in the
13356 + * documentation and/or other materials provided with the distribution.
13357 + * 3. The names of the above-listed copyright holders may not be used
13358 + * to endorse or promote products derived from this software without
13359 + * specific prior written permission.
13361 + * ALTERNATIVELY, this software may be distributed under the terms of the
13362 + * GNU General Public License ("GPL") version 2, as published by the Free
13363 + * Software Foundation.
13365 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
13366 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
13367 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
13368 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
13369 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
13370 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
13371 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
13372 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
13373 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
13374 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
13375 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13378 +#ifndef VCHIQ_UTIL_H
13379 +#define VCHIQ_UTIL_H
13381 +#include <linux/types.h>
13382 +#include <linux/semaphore.h>
13383 +#include <linux/mutex.h>
13384 +#include <linux/bitops.h>
13385 +#include <linux/kthread.h>
13386 +#include <linux/wait.h>
13387 +#include <linux/vmalloc.h>
13388 +#include <linux/jiffies.h>
13389 +#include <linux/delay.h>
13390 +#include <linux/string.h>
13391 +#include <linux/types.h>
13392 +#include <linux/interrupt.h>
13393 +#include <linux/random.h>
13394 +#include <linux/sched.h>
13395 +#include <linux/ctype.h>
13396 +#include <linux/uaccess.h>
13397 +#include <linux/time.h> /* for time_t */
13398 +#include <linux/slab.h>
13399 +#include <linux/vmalloc.h>
13401 +#include "vchiq_if.h"
13408 + struct semaphore pop;
13409 + struct semaphore push;
13411 + VCHIQ_HEADER_T **storage;
13414 +extern int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size);
13415 +extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue);
13417 +extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue);
13418 +extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue);
13420 +extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header);
13422 +extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue);
13423 +extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
13427 diff -urN linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
13428 --- linux-3.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c 1970-01-01 01:00:00.000000000 +0100
13429 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c 2013-07-06 15:25:50.000000000 +0100
13432 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
13434 + * Redistribution and use in source and binary forms, with or without
13435 + * modification, are permitted provided that the following conditions
13437 + * 1. Redistributions of source code must retain the above copyright
13438 + * notice, this list of conditions, and the following disclaimer,
13439 + * without modification.
13440 + * 2. Redistributions in binary form must reproduce the above copyright
13441 + * notice, this list of conditions and the following disclaimer in the
13442 + * documentation and/or other materials provided with the distribution.
13443 + * 3. The names of the above-listed copyright holders may not be used
13444 + * to endorse or promote products derived from this software without
13445 + * specific prior written permission.
13447 + * ALTERNATIVELY, this software may be distributed under the terms of the
13448 + * GNU General Public License ("GPL") version 2, as published by the Free
13449 + * Software Foundation.
13451 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
13452 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
13453 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
13454 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
13455 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
13456 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
13457 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
13458 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
13459 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
13460 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
13461 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13463 +#include "vchiq_build_info.h"
13464 +#include <linux/broadcom/vc_debug_sym.h>
13466 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
13467 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
13468 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time, __TIME__ );
13469 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date, __DATE__ );
13471 +const char *vchiq_get_build_hostname( void )
13473 + return vchiq_build_hostname;
13476 +const char *vchiq_get_build_version( void )
13478 + return vchiq_build_version;
13481 +const char *vchiq_get_build_date( void )
13483 + return vchiq_build_date;
13486 +const char *vchiq_get_build_time( void )
13488 + return vchiq_build_time;
13490 diff -urN linux-3.10/drivers/misc/vc04_services/Kconfig linux-rpi-3.10.y/drivers/misc/vc04_services/Kconfig
13491 --- linux-3.10/drivers/misc/vc04_services/Kconfig 1970-01-01 01:00:00.000000000 +0100
13492 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/Kconfig 2013-07-06 15:25:50.000000000 +0100
13494 +config BCM2708_VCHIQ
13495 + tristate "Videocore VCHIQ"
13496 + depends on MACH_BCM2708
13499 + Kernel to VideoCore communication interface for the
13500 + BCM2708 family of products.
13501 + Defaults to Y when the Broadcom Videocore services
13502 + are included in the build, N otherwise.
13504 diff -urN linux-3.10/drivers/misc/vc04_services/Makefile linux-rpi-3.10.y/drivers/misc/vc04_services/Makefile
13505 --- linux-3.10/drivers/misc/vc04_services/Makefile 1970-01-01 01:00:00.000000000 +0100
13506 +++ linux-rpi-3.10.y/drivers/misc/vc04_services/Makefile 2013-07-06 15:25:50.000000000 +0100
13508 +ifeq ($(CONFIG_MACH_BCM2708),y)
13510 +obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o
13513 + interface/vchiq_arm/vchiq_core.o \
13514 + interface/vchiq_arm/vchiq_arm.o \
13515 + interface/vchiq_arm/vchiq_kern_lib.o \
13516 + interface/vchiq_arm/vchiq_2835_arm.o \
13517 + interface/vchiq_arm/vchiq_proc.o \
13518 + interface/vchiq_arm/vchiq_shim.o \
13519 + interface/vchiq_arm/vchiq_util.o \
13520 + interface/vchiq_arm/vchiq_connected.o \
13522 +EXTRA_CFLAGS += -DVCOS_VERIFY_BKPTS=1 -Idrivers/misc/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
13526 diff -urN linux-3.10/include/linux/broadcom/vc_cma.h linux-rpi-3.10.y/include/linux/broadcom/vc_cma.h
13527 --- linux-3.10/include/linux/broadcom/vc_cma.h 1970-01-01 01:00:00.000000000 +0100
13528 +++ linux-rpi-3.10.y/include/linux/broadcom/vc_cma.h 2013-07-06 15:25:50.000000000 +0100
13530 +/*****************************************************************************
13531 +* Copyright 2012 Broadcom Corporation. All rights reserved.
13533 +* Unless you and Broadcom execute a separate written software license
13534 +* agreement governing use of this software, this software is licensed to you
13535 +* under the terms of the GNU General Public License version 2, available at
13536 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
13538 +* Notwithstanding the above, under no circumstances may you combine this
13539 +* software in any way with any other Broadcom software provided under a
13540 +* license other than the GPL, without Broadcom's express prior written
13542 +*****************************************************************************/
13544 +#if !defined( VC_CMA_H )
13547 +#include <linux/ioctl.h>
13549 +#define VC_CMA_IOC_MAGIC 0xc5
13551 +#define VC_CMA_IOC_RESERVE _IO(VC_CMA_IOC_MAGIC, 0)
13554 +extern void __init vc_cma_early_init(void);
13555 +extern void __init vc_cma_reserve(void);
13558 +#endif /* VC_CMA_H */