Add ocf to 2.6.25 for ubsec ssb integration
[openwrt.git] / target / linux / generic-2.6 / patches-2.6.25 / 950-ocf-linux-26-20080704.patch
1 Index: linux-2.6.x/crypto/Kconfig
2 ===================================================================
3 RCS file: /cvs/sw/linux-2.6.x/crypto/Kconfig,v
4 retrieving revision 1.1.1.29
5 diff -u -r1.1.1.29 Kconfig
6 --- linux-2.6.x/crypto/Kconfig  10 Oct 2007 00:54:29 -0000      1.1.1.29
7 +++ linux-2.6.x/crypto/Kconfig  15 Dec 2007 11:08:08 -0000
8 @@ -471,3 +471,6 @@
9  source "drivers/crypto/Kconfig"
10  
11  endif  # if CRYPTO
12 +
13 +source "crypto/ocf/Kconfig"
14 +
15 Index: linux-2.6.x/crypto/Makefile
16 ===================================================================
17 RCS file: /cvs/sw/linux-2.6.x/crypto/Makefile,v
18 retrieving revision 1.1.1.23
19 diff -u -r1.1.1.23 Makefile
20 --- linux-2.6.x/crypto/Makefile 10 Oct 2007 00:54:29 -0000      1.1.1.23
21 +++ linux-2.6.x/crypto/Makefile 15 Dec 2007 11:08:08 -0000
22 @@ -51,6 +51,8 @@
23  
24  obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
25  
26 +obj-$(CONFIG_OCF_OCF) += ocf/
27 +
28  #
29  # generic algorithms and the async_tx api
30  #
31 Index: linux-2.6.x/drivers/char/random.c
32 ===================================================================
33 RCS file: /cvs/sw/linux-2.6.x/drivers/char/random.c,v
34 retrieving revision 1.1.1.41
35 retrieving revision 1.6
36 diff -u -r1.1.1.41 -r1.6
37 --- linux-2.6.x/drivers/char/random.c   22 Apr 2008 01:36:57 -0000      1.1.1.41
38 +++ linux-2.6.x/drivers/char/random.c   22 Apr 2008 04:48:56 -0000      1.6
39 @@ -129,6 +129,9 @@
40   *                                unsigned int value);
41   *     void add_interrupt_randomness(int irq);
42   *
43 + *      void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
44 + *      int random_input_wait(void);
45 + *
46   * add_input_randomness() uses the input layer interrupt timing, as well as
47   * the event type information from the hardware.
48   *
49 @@ -140,6 +143,13 @@
50   * a better measure, since the timing of the disk interrupts are more
51   * unpredictable.
52   *
53 + * random_input_words() just provides a raw block of entropy to the input
54 + * pool, such as from a hardware entropy generator.
55 + *
56 + * random_input_wait() suspends the caller until such time as the
57 + * entropy pool falls below the write threshold, and returns a count of how
58 + * much entropy (in bits) is needed to sustain the pool.
59 + *
60   * All of these routines try to estimate how many bits of randomness a
61   * particular randomness source.  They do this by keeping track of the
62   * first and second order deltas of the event timings.
63 @@ -669,6 +679,61 @@
64  }
65  #endif
66  
67 +/*
68 + * random_input_words - add bulk entropy to pool
69 + *
70 + * @buf: buffer to add
71 + * @wordcount: number of __u32 words to add
72 + * @ent_count: total amount of entropy (in bits) to credit
73 + *
74 + * this provides bulk input of entropy to the input pool
75 + *
76 + */
77 +void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
78 +{
79 +       add_entropy_words(&input_pool, buf, wordcount);
80 +
81 +       credit_entropy_store(&input_pool, ent_count);
82 +
83 +       DEBUG_ENT("crediting %d bits => %d\n",
84 +                 ent_count, input_pool.entropy_count);
85 +       /*
86 +        * Wake up waiting processes if we have enough
87 +        * entropy.
88 +        */
89 +       if (input_pool.entropy_count >= random_read_wakeup_thresh)
90 +               wake_up_interruptible(&random_read_wait);
91 +}
92 +EXPORT_SYMBOL(random_input_words);
93 +
94 +/*
95 + * random_input_wait - wait until random needs entropy
96 + *
97 + * this function sleeps until the /dev/random subsystem actually
98 + * needs more entropy, and then return the amount of entropy
99 + * that it would be nice to have added to the system.
100 + */
101 +int random_input_wait(void)
102 +{
103 +       int count;
104 +
105 +       wait_event_interruptible(random_write_wait, 
106 +                        input_pool.entropy_count < random_write_wakeup_thresh);
107 +
108 +       count = random_write_wakeup_thresh - input_pool.entropy_count;
109 +
110 +        /* likely we got woken up due to a signal */
111 +       if (count <= 0) count = random_read_wakeup_thresh; 
112 +
113 +       DEBUG_ENT("requesting %d bits from input_wait()er %d<%d\n",
114 +                 count,
115 +                 input_pool.entropy_count, random_write_wakeup_thresh);
116 +
117 +       return count;
118 +}
119 +EXPORT_SYMBOL(random_input_wait);
120 +
121 +
122  #define EXTRACT_SIZE 10
123  
124  /*********************************************************************
125 Index: linux-2.6.x/fs/fcntl.c
126 ===================================================================
127 RCS file: /cvs/sw/linux-2.6.x/fs/fcntl.c,v
128 retrieving revision 1.1.1.39
129 retrieving revision 1.5
130 diff -u -r1.1.1.39 -r1.5
131 --- linux-2.6.x/fs/fcntl.c      22 Apr 2008 01:37:55 -0000      1.1.1.39
132 +++ linux-2.6.x/fs/fcntl.c      22 Apr 2008 04:49:02 -0000      1.5
133 @@ -202,6 +202,7 @@
134                 ret = dupfd(file, 0, 0);
135         return ret;
136  }
137 +EXPORT_SYMBOL(sys_dup);
138  
139  #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
140  
141 Index: linux-2.6.x/include/linux/miscdevice.h
142 ===================================================================
143 RCS file: /cvs/sw/linux-2.6.x/include/linux/miscdevice.h,v
144 retrieving revision 1.1.1.16
145 retrieving revision 1.8
146 diff -u -r1.1.1.16 -r1.8
147 --- linux-2.6.x/include/linux/miscdevice.h      22 Apr 2008 01:36:52 -0000      1.1.1.16
148 +++ linux-2.6.x/include/linux/miscdevice.h      22 Apr 2008 04:49:10 -0000      1.8
149 @@ -12,6 +12,7 @@
150  #define APOLLO_MOUSE_MINOR 7
151  #define PC110PAD_MINOR 9
152  /*#define ADB_MOUSE_MINOR 10   FIXME OBSOLETE */
153 +#define CRYPTODEV_MINOR                70      /* /dev/crypto */
154  #define WATCHDOG_MINOR         130     /* Watchdog timer     */
155  #define TEMP_MINOR             131     /* Temperature Sensor */
156  #define RTC_MINOR 135
157 Index: linux-2.6.x/include/linux/random.h
158 ===================================================================
159 RCS file: /cvs/sw/linux-2.6.x/include/linux/random.h,v
160 retrieving revision 1.1.1.12
161 retrieving revision 1.5
162 diff -u -r1.1.1.12 -r1.5
163 --- linux-2.6.x/include/linux/random.h  26 Apr 2007 11:16:52 -0000      1.1.1.12
164 +++ linux-2.6.x/include/linux/random.h  22 May 2008 03:31:38 -0000      1.5
165 @@ -8,6 +8,7 @@
166  #define _LINUX_RANDOM_H
167  
168  #include <linux/ioctl.h>
169 +#include <linux/types.h> /* for __u32 in user space */
170  
171  /* ioctl()'s for the random number generator */
172  
173 @@ -32,6 +33,30 @@
174  /* Clear the entropy pool and associated counters.  (Superuser only.) */
175  #define RNDCLEARPOOL   _IO( 'R', 0x06 )
176  
177 +#ifdef CONFIG_FIPS_RNG
178 +
179 +/* Size of seed value - equal to AES blocksize */
180 +#define AES_BLOCK_SIZE_BYTES   16
181 +#define SEED_SIZE_BYTES                        AES_BLOCK_SIZE_BYTES
182 +/* Size of AES key */
183 +#define KEY_SIZE_BYTES         16
184 +
185 +/* ioctl() structure used by FIPS 140-2 Tests */
186 +struct rand_fips_test {
187 +       unsigned char key[KEY_SIZE_BYTES];                      /* Input */
188 +       unsigned char datetime[SEED_SIZE_BYTES];        /* Input */
189 +       unsigned char seed[SEED_SIZE_BYTES];            /* Input */
190 +       unsigned char result[SEED_SIZE_BYTES];          /* Output */
191 +};
192 +
193 +/* FIPS 140-2 RNG Variable Seed Test. (Superuser only.) */
194 +#define RNDFIPSVST     _IOWR('R', 0x10, struct rand_fips_test)
195 +
196 +/* FIPS 140-2 RNG Monte Carlo Test. (Superuser only.) */
197 +#define RNDFIPSMCT     _IOWR('R', 0x11, struct rand_fips_test)
198 +
199 +#endif /* #ifdef CONFIG_FIPS_RNG */
200 +
201  struct rand_pool_info {
202         int     entropy_count;
203         int     buf_size;
204 @@ -48,6 +73,10 @@
205                                  unsigned int value);
206  extern void add_interrupt_randomness(int irq);
207  
208 +extern void random_input_words(__u32 *buf, size_t wordcount, int ent_count);
209 +extern int random_input_wait(void);
210 +#define HAS_RANDOM_INPUT_WAIT 1
211 +
212  extern void get_random_bytes(void *buf, int nbytes);
213  void generate_random_uuid(unsigned char uuid_out[16]);
214  
215 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
216 +++ linux/crypto/ocf/hifn/Makefile      2007-07-25 11:02:33.000000000 +1000
217 @@ -0,0 +1,13 @@
218 +# for SGlinux builds
219 +-include $(ROOTDIR)/modules/.config
220 +
221 +obj-$(CONFIG_OCF_HIFN)     += hifn7751.o
222 +obj-$(CONFIG_OCF_HIFNHIPP) += hifnHIPP.o
223 +
224 +obj ?= .
225 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
226 +
227 +ifdef TOPDIR
228 +-include $(TOPDIR)/Rules.make
229 +endif
230 +
231 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
232 +++ linux/crypto/ocf/safe/Makefile      2007-07-25 11:02:33.000000000 +1000
233 @@ -0,0 +1,12 @@
234 +# for SGlinux builds
235 +-include $(ROOTDIR)/modules/.config
236 +
237 +obj-$(CONFIG_OCF_SAFE) += safe.o
238 +
239 +obj ?= .
240 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
241 +
242 +ifdef TOPDIR
243 +-include $(TOPDIR)/Rules.make
244 +endif
245 +
246 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
247 +++ linux/crypto/ocf/Makefile   2008-07-04 14:48:17.000000000 +1000
248 @@ -0,0 +1,120 @@
249 +# for SGlinux builds
250 +-include $(ROOTDIR)/modules/.config
251 +
252 +OCF_OBJS = crypto.o criov.o
253 +
254 +ifdef CONFIG_OCF_RANDOMHARVEST
255 +       OCF_OBJS += random.o
256 +endif
257 +
258 +ifdef CONFIG_OCF_FIPS
259 +       OCF_OBJS += rndtest.o
260 +endif
261 +
262 +# Add in autoconf.h to get #defines for CONFIG_xxx
263 +AUTOCONF_H=$(ROOTDIR)/modules/autoconf.h
264 +ifeq ($(AUTOCONF_H), $(wildcard $(AUTOCONF_H)))
265 +       EXTRA_CFLAGS += -include $(AUTOCONF_H)
266 +       export EXTRA_CFLAGS
267 +endif
268 +
269 +ifndef obj
270 +       obj ?= .
271 +       _obj = subdir
272 +       mod-subdirs := safe hifn ixp4xx talitos ocfnull
273 +       export-objs += crypto.o criov.o random.o
274 +       list-multi += ocf.o
275 +       _slash :=
276 +else
277 +       _obj = obj
278 +       _slash := /
279 +endif
280 +
281 +EXTRA_CFLAGS += -I$(obj)/.
282 +
283 +obj-$(CONFIG_OCF_OCF)         += ocf.o
284 +obj-$(CONFIG_OCF_CRYPTODEV)   += cryptodev.o
285 +obj-$(CONFIG_OCF_CRYPTOSOFT)  += cryptosoft.o
286 +obj-$(CONFIG_OCF_BENCH)       += ocf-bench.o
287 +
288 +$(_obj)-$(CONFIG_OCF_SAFE)    += safe$(_slash)
289 +$(_obj)-$(CONFIG_OCF_HIFN)    += hifn$(_slash)
290 +$(_obj)-$(CONFIG_OCF_IXP4XX)  += ixp4xx$(_slash)
291 +$(_obj)-$(CONFIG_OCF_TALITOS) += talitos$(_slash)
292 +$(_obj)-$(CONFIG_OCF_PASEMI)  += pasemi$(_slash)
293 +$(_obj)-$(CONFIG_OCF_OCFNULL) += ocfnull$(_slash)
294 +
295 +ocf-objs := $(OCF_OBJS)
296 +
297 +$(list-multi) dummy1: $(ocf-objs)
298 +       $(LD) -r -o $@ $(ocf-objs)
299 +
300 +.PHONY:
301 +clean:
302 +       rm -f *.o *.ko .*.o.flags .*.ko.cmd .*.o.cmd .*.mod.o.cmd *.mod.c
303 +       rm -f */*.o */*.ko */.*.o.cmd */.*.ko.cmd */.*.mod.o.cmd */*.mod.c */.*.o.flags
304 +
305 +ifdef TOPDIR
306 +-include $(TOPDIR)/Rules.make
307 +endif
308 +
309 +#
310 +# release gen targets
311 +#
312 +
313 +.PHONY: patch
314 +patch:
315 +       REL=`date +%Y%m%d`; \
316 +               patch=ocf-linux-$$REL.patch; \
317 +               patch24=ocf-linux-24-$$REL.patch; \
318 +               patch26=ocf-linux-26-$$REL.patch; \
319 +               ( \
320 +                       find . -name Makefile; \
321 +                       find . -name Config.in; \
322 +                       find . -name Kconfig; \
323 +                       find . -name README; \
324 +                       find . -name '*.[ch]' | grep -v '.mod.c'; \
325 +               ) | while read t; do \
326 +                       diff -Nau /dev/null $$t | sed 's?^+++ \./?+++ linux/crypto/ocf/?'; \
327 +               done > $$patch; \
328 +               cat patches/linux-2.4.35-ocf.patch $$patch > $$patch24; \
329 +               cat patches/linux-2.6.25-ocf.patch $$patch > $$patch26
330 +
331 +.PHONY: tarball
332 +tarball:
333 +       REL=`date +%Y%m%d`; RELDIR=/tmp/ocf-linux-$$REL; \
334 +               CURDIR=`pwd`; \
335 +               rm -rf /tmp/ocf-linux-$$REL*; \
336 +               mkdir -p $$RELDIR/tools; \
337 +               cp README* $$RELDIR; \
338 +               cp patches/openss*.patch $$RELDIR; \
339 +               cp patches/crypto-tools.patch $$RELDIR; \
340 +               cp tools/[!C]* $$RELDIR/tools; \
341 +               cd ..; \
342 +               tar cvf $$RELDIR/ocf-linux.tar \
343 +                                       --exclude=CVS \
344 +                                       --exclude=.* \
345 +                                       --exclude=*.o \
346 +                                       --exclude=*.ko \
347 +                                       --exclude=*.mod.* \
348 +                                       --exclude=README* \
349 +                                       --exclude=ocf-*.patch \
350 +                                       --exclude=ocf/patches/openss*.patch \
351 +                                       --exclude=ocf/patches/crypto-tools.patch \
352 +                                       --exclude=ocf/tools \
353 +                                       ocf; \
354 +               gzip -9 $$RELDIR/ocf-linux.tar; \
355 +               cd /tmp; \
356 +               tar cvf ocf-linux-$$REL.tar ocf-linux-$$REL; \
357 +               gzip -9 ocf-linux-$$REL.tar; \
358 +               cd $$CURDIR/../../user; \
359 +               rm -rf /tmp/crypto-tools-$$REL*; \
360 +               tar cvf /tmp/crypto-tools-$$REL.tar \
361 +                                       --exclude=CVS \
362 +                                       --exclude=.* \
363 +                                       --exclude=*.o \
364 +                                       --exclude=cryptotest \
365 +                                       --exclude=cryptokeytest \
366 +                                       crypto-tools; \
367 +               gzip -9 /tmp/crypto-tools-$$REL.tar
368 +
369 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
370 +++ linux/crypto/ocf/talitos/Makefile   2007-07-25 11:02:33.000000000 +1000
371 @@ -0,0 +1,12 @@
372 +# for SGlinux builds
373 +-include $(ROOTDIR)/modules/.config
374 +
375 +obj-$(CONFIG_OCF_TALITOS) += talitos.o
376 +
377 +obj ?= .
378 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
379 +
380 +ifdef TOPDIR
381 +-include $(TOPDIR)/Rules.make
382 +endif
383 +
384 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
385 +++ linux/crypto/ocf/ixp4xx/Makefile    2007-10-19 11:24:59.000000000 +1000
386 @@ -0,0 +1,104 @@
387 +# for SGlinux builds
388 +-include $(ROOTDIR)/modules/.config
389 +
390 +#
391 +# You will need to point this at your Intel ixp425 includes,  this portion
392 +# of the Makefile only really works under SGLinux with the appropriate libs
393 +# installed.  They can be downloaded from http://www.snapgear.org/
394 +#
395 +ifeq ($(CONFIG_CPU_IXP46X),y)
396 +IXPLATFORM = ixp46X
397 +else
398 +ifeq ($(CONFIG_CPU_IXP43X),y)
399 +IXPLATFORM = ixp43X
400 +else
401 +IXPLATFORM = ixp42X
402 +endif
403 +endif
404 +
405 +ifdef CONFIG_IXP400_LIB_2_4
406 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp400_xscale_sw
407 +OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp_osal
408 +endif
409 +ifdef CONFIG_IXP400_LIB_2_1
410 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp400_xscale_sw
411 +OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp_osal
412 +endif
413 +ifdef CONFIG_IXP400_LIB_2_0
414 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp400_xscale_sw
415 +OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp_osal
416 +endif
417 +ifdef IX_XSCALE_SW
418 +ifdef CONFIG_IXP400_LIB_2_4
419 +IXP_CFLAGS = \
420 +       -I$(ROOTDIR)/. \
421 +       -I$(IX_XSCALE_SW)/src/include \
422 +       -I$(OSAL_DIR)/common/include/ \
423 +       -I$(OSAL_DIR)/common/include/modules/ \
424 +       -I$(OSAL_DIR)/common/include/modules/ddk/ \
425 +       -I$(OSAL_DIR)/common/include/modules/bufferMgt/ \
426 +       -I$(OSAL_DIR)/common/include/modules/ioMem/ \
427 +       -I$(OSAL_DIR)/common/os/linux/include/ \
428 +       -I$(OSAL_DIR)/common/os/linux/include/core/  \
429 +       -I$(OSAL_DIR)/common/os/linux/include/modules/ \
430 +       -I$(OSAL_DIR)/common/os/linux/include/modules/ddk/ \
431 +       -I$(OSAL_DIR)/common/os/linux/include/modules/bufferMgt/ \
432 +       -I$(OSAL_DIR)/common/os/linux/include/modules/ioMem/ \
433 +       -I$(OSAL_DIR)/platforms/$(IXPLATFORM)/include/ \
434 +       -I$(OSAL_DIR)/platforms/$(IXPLATFORM)/os/linux/include/ \
435 +       -DENABLE_IOMEM -DENABLE_BUFFERMGT -DENABLE_DDK \
436 +       -DUSE_IXP4XX_CRYPTO
437 +else
438 +IXP_CFLAGS = \
439 +       -I$(ROOTDIR)/. \
440 +       -I$(IX_XSCALE_SW)/src/include \
441 +       -I$(OSAL_DIR)/ \
442 +       -I$(OSAL_DIR)/os/linux/include/ \
443 +       -I$(OSAL_DIR)/os/linux/include/modules/ \
444 +       -I$(OSAL_DIR)/os/linux/include/modules/ioMem/ \
445 +       -I$(OSAL_DIR)/os/linux/include/modules/bufferMgt/ \
446 +       -I$(OSAL_DIR)/os/linux/include/core/  \
447 +       -I$(OSAL_DIR)/os/linux/include/platforms/ \
448 +       -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ \
449 +       -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp425 \
450 +       -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp465 \
451 +       -I$(OSAL_DIR)/os/linux/include/core/ \
452 +       -I$(OSAL_DIR)/include/ \
453 +       -I$(OSAL_DIR)/include/modules/ \
454 +       -I$(OSAL_DIR)/include/modules/bufferMgt/ \
455 +       -I$(OSAL_DIR)/include/modules/ioMem/ \
456 +       -I$(OSAL_DIR)/include/platforms/ \
457 +       -I$(OSAL_DIR)/include/platforms/ixp400/ \
458 +       -DUSE_IXP4XX_CRYPTO
459 +endif
460 +endif
461 +ifdef CONFIG_IXP400_LIB_1_4
462 +IXP_CFLAGS   = \
463 +       -I$(ROOTDIR)/. \
464 +       -I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/include \
465 +       -I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/linux \
466 +       -DUSE_IXP4XX_CRYPTO
467 +endif
468 +ifndef IXPDIR
469 +IXPDIR = ixp-version-is-not-supported
470 +endif
471 +
472 +ifeq ($(CONFIG_CPU_IXP46X),y)
473 +IXP_CFLAGS += -D__ixp46X
474 +else
475 +ifeq ($(CONFIG_CPU_IXP43X),y)
476 +IXP_CFLAGS += -D__ixp43X
477 +else
478 +IXP_CFLAGS += -D__ixp42X
479 +endif
480 +endif
481 +
482 +obj-$(CONFIG_OCF_IXP4XX) += ixp4xx.o
483 +
484 +obj ?= .
485 +EXTRA_CFLAGS += $(IXP_CFLAGS) -I$(obj)/.. -I$(obj)/.
486 +
487 +ifdef TOPDIR
488 +-include $(TOPDIR)/Rules.make
489 +endif
490 +
491 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
492 +++ linux/crypto/ocf/ocfnull/Makefile   2007-07-25 11:02:33.000000000 +1000
493 @@ -0,0 +1,12 @@
494 +# for SGlinux builds
495 +-include $(ROOTDIR)/modules/.config
496 +
497 +obj-$(CONFIG_OCF_OCFNULL) += ocfnull.o
498 +
499 +obj ?= .
500 +EXTRA_CFLAGS += -I$(obj)/..
501 +
502 +ifdef TOPDIR
503 +-include $(TOPDIR)/Rules.make
504 +endif
505 +
506 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
507 +++ linux/crypto/ocf/pasemi/Makefile    2007-12-12 11:36:18.000000000 +1000
508 @@ -0,0 +1,12 @@
509 +# for SGlinux builds
510 +-include $(ROOTDIR)/modules/.config
511 +
512 +obj-$(CONFIG_OCF_PASEMI) += pasemi.o
513 +
514 +obj ?= .
515 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
516 +
517 +ifdef TOPDIR
518 +-include $(TOPDIR)/Rules.make
519 +endif
520 +
521 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
522 +++ linux/crypto/ocf/Config.in  2008-06-23 10:09:55.000000000 +1000
523 @@ -0,0 +1,32 @@
524 +#############################################################################
525 +
526 +mainmenu_option next_comment
527 +comment 'OCF Configuration'
528 +tristate 'OCF (Open Cryptograhic Framework)' CONFIG_OCF_OCF
529 +dep_mbool '  enable fips RNG checks (fips check on RNG data before use)' \
530 +                               CONFIG_OCF_FIPS $CONFIG_OCF_OCF
531 +dep_mbool '  enable harvesting entropy for /dev/random' \
532 +                               CONFIG_OCF_RANDOMHARVEST $CONFIG_OCF_OCF
533 +dep_tristate '  cryptodev (user space support)' \
534 +                               CONFIG_OCF_CRYPTODEV $CONFIG_OCF_OCF
535 +dep_tristate '  cryptosoft (software crypto engine)' \
536 +                               CONFIG_OCF_CRYPTOSOFT $CONFIG_OCF_OCF
537 +dep_tristate '  safenet (HW crypto engine)' \
538 +                               CONFIG_OCF_SAFE $CONFIG_OCF_OCF
539 +dep_tristate '  IXP4xx (HW crypto engine)' \
540 +                               CONFIG_OCF_IXP4XX $CONFIG_OCF_OCF
541 +dep_mbool    '  Enable IXP4xx HW to perform SHA1 and MD5 hashing (very slow)' \
542 +                               CONFIG_OCF_IXP4XX_SHA1_MD5 $CONFIG_OCF_IXP4XX
543 +dep_tristate '  hifn (HW crypto engine)' \
544 +                               CONFIG_OCF_HIFN $CONFIG_OCF_OCF
545 +dep_tristate '  talitos (HW crypto engine)' \
546 +                               CONFIG_OCF_TALITOS $CONFIG_OCF_OCF
547 +dep_tristate '  pasemi (HW crypto engine)' \
548 +                               CONFIG_OCF_PASEMI $CONFIG_OCF_OCF
549 +dep_tristate '  ocfnull (does no crypto)' \
550 +                               CONFIG_OCF_OCFNULL $CONFIG_OCF_OCF
551 +dep_tristate '  ocf-bench (HW crypto in-kernel benchmark)' \
552 +                               CONFIG_OCF_BENCH $CONFIG_OCF_OCF
553 +endmenu
554 +
555 +#############################################################################
556 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
557 +++ linux/crypto/ocf/Kconfig    2008-06-23 10:10:33.000000000 +1000
558 @@ -0,0 +1,95 @@
559 +menu "OCF Configuration"
560 +
561 +config OCF_OCF
562 +       tristate "OCF (Open Cryptograhic Framework)"
563 +       help
564 +         A linux port of the OpenBSD/FreeBSD crypto framework.
565 +
566 +config OCF_RANDOMHARVEST
567 +       bool "crypto random --- harvest entropy for /dev/random"
568 +       depends on OCF_OCF
569 +       help
570 +         Includes code to harvest random numbers from devices that support it.
571 +
572 +config OCF_FIPS
573 +       bool "enable fips RNG checks"
574 +       depends on OCF_OCF && OCF_RANDOMHARVEST
575 +       help
576 +         Run all RNG provided data through a fips check before
577 +         adding it /dev/random's entropy pool.
578 +
579 +config OCF_CRYPTODEV
580 +       tristate "cryptodev (user space support)"
581 +       depends on OCF_OCF
582 +       help
583 +         The user space API to access crypto hardware.
584 +
585 +config OCF_CRYPTOSOFT
586 +       tristate "cryptosoft (software crypto engine)"
587 +       depends on OCF_OCF
588 +       help
589 +         A software driver for the OCF framework that uses
590 +         the kernel CryptoAPI.
591 +
592 +config OCF_SAFE
593 +       tristate "safenet (HW crypto engine)"
594 +       depends on OCF_OCF
595 +       help
596 +         A driver for a number of the safenet Excel crypto accelerators.
597 +         Currently tested and working on the 1141 and 1741.
598 +
599 +config OCF_IXP4XX
600 +       tristate "IXP4xx (HW crypto engine)"
601 +       depends on OCF_OCF
602 +       help
603 +         XScale IXP4xx crypto accelerator driver.  Requires the
604 +         Intel Access library.
605 +
606 +config OCF_IXP4XX_SHA1_MD5
607 +       bool "IXP4xx SHA1 and MD5 Hashing"
608 +       depends on OCF_IXP4XX
609 +       help
610 +         Allows the IXP4xx crypto accelerator to perform SHA1 and MD5 hashing.
611 +         Note: this is MUCH slower than using cryptosoft (software crypto engine).
612 +
613 +config OCF_HIFN
614 +       tristate "hifn (HW crypto engine)"
615 +       depends on OCF_OCF
616 +       help
617 +         OCF driver for various HIFN based crypto accelerators.
618 +         (7951, 7955, 7956, 7751, 7811)
619 +
620 +config OCF_HIFNHIPP
621 +       tristate "Hifn HIPP (HW packet crypto engine)"
622 +       depends on OCF_OCF
623 +       help
624 +         OCF driver for various HIFN (HIPP) based crypto accelerators
625 +         (7855)
626 +
627 +config OCF_TALITOS
628 +       tristate "talitos (HW crypto engine)"
629 +       depends on OCF_OCF
630 +       help
631 +         OCF driver for Freescale's security engine (SEC/talitos).
632 +
633 +config OCF_PASEMI
634 +        tristate "pasemi (HW crypto engine)"
635 +        depends on OCF_OCF && PPC_PASEMI
636 +        help
637 +          OCF driver for for PA Semi PWRficient DMA Engine
638 +
639 +config OCF_OCFNULL
640 +       tristate "ocfnull (fake crypto engine)"
641 +       depends on OCF_OCF
642 +       help
643 +         OCF driver for measuring ipsec overheads (does no crypto)
644 +
645 +config OCF_BENCH
646 +       tristate "ocf-bench (HW crypto in-kernel benchmark)"
647 +       depends on OCF_OCF
648 +       help
649 +         A very simple encryption test for the in-kernel interface
650 +         of OCF.  Also includes code to benchmark the IXP Access library
651 +         for comparison.
652 +
653 +endmenu
654 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
655 +++ linux/crypto/ocf/README     2007-12-15 21:31:03.000000000 +1000
656 @@ -0,0 +1,166 @@
657 +README - ocf-linux-20071215
658 +---------------------------
659 +
660 +This README provides instructions for getting ocf-linux compiled and
661 +operating in a generic linux environment.  For other information you
662 +might like to visit the home page for this project:
663 +
664 +    http://ocf-linux.sourceforge.net/
665 +
666 +Adding OCF to linux
667 +-------------------
668 +
669 +    Not much in this file for now,  just some notes.  I usually build
670 +    the ocf support as modules but it can be built into the kernel as
671 +    well.  To use it:
672 +
673 +    * mknod /dev/crypto c 10 70
674 +
675 +    * to add OCF to your kernel source,  you have two options.  Apply
676 +      the kernel specific patch:
677 +
678 +          cd linux-2.4*; gunzip < ocf-linux-24-XXXXXXXX.patch.gz | patch -p1
679 +          cd linux-2.6*; gunzip < ocf-linux-26-XXXXXXXX.patch.gz | patch -p1
680 +    
681 +      if you do one of the above,  then you can proceed to the next step,
682 +      or you can do the above process by hand with using the patches against
683 +      linux-2.4.35 and 2.6.23 to include the ocf code under crypto/ocf.
684 +      Here's how to add it:
685 +
686 +      for 2.4.35 (and later)
687 +
688 +          cd linux-2.4.35/crypto
689 +          tar xvzf ocf-linux.tar.gz
690 +          cd ..
691 +          patch -p1 < crypto/ocf/patches/linux-2.4.35-ocf.patch
692 +
693 +      for 2.6.23 (and later)
694 +
695 +          cd linux-2.6.23/crypto
696 +          tar xvzf ocf-linux.tar.gz
697 +          cd ..
698 +          patch -p1 < crypto/ocf/patches/linux-2.6.23-ocf.patch
699 +
700 +      It should be easy to take this patch and apply it to other more
701 +      recent versions of the kernels.  The same patches should also work
702 +      relatively easily on kernels as old as 2.6.11 and 2.4.18.
703 +      
704 +    * under 2.4 if you are on a non-x86 platform,  you may need to:
705 +
706 +        cp linux-2.X.x/include/asm-i386/kmap_types.h linux-2.X.x/include/asm-YYY
707 +
708 +      so that you can build the kernel crypto support needed for the cryptosoft
709 +      driver.
710 +
711 +    * For simplicity you should enable all the crypto support in your kernel
712 +      except for the test driver.  Likewise for the OCF options.  Do not
713 +      enable OCF crypto drivers for HW that you do not have (for example
714 +      ixp4xx will not compile on non-Xscale systems).
715 +
716 +    * make sure that cryptodev.h (from ocf-linux.tar.gz) is installed as
717 +      crypto/cryptodev.h in an include directory that is used for building
718 +      applications for your platform.  For example on a host system that
719 +      might be:
720 +
721 +              /usr/include/crypto/cryptodev.h
722 +
723 +    * patch your openssl-0.9.8g code with the openssl-0.9.8g.patch.
724 +      (NOTE: there is no longer a need to patch ssh). The patch is against:
725 +      openssl-0_9_8e
726 +
727 +      If you need a patch for an older version of openssl,  you should look
728 +      to older OCF releases.  This patch is unlikely to work on older
729 +      openssl versions.
730 +
731 +      openssl-0.9.8g.patch
732 +                - enables --with-cryptodev for non BSD systems
733 +                - adds -cpu option to openssl speed for calculating CPU load
734 +                  under linux
735 +                - fixes null pointer in openssl speed multi thread output.
736 +                - fixes test keys to work with linux crypto's more stringent
737 +                  key checking.
738 +                - adds MD5/SHA acceleration (Ronen Shitrit), only enabled
739 +                  with the --with-cryptodev-digests option
740 +                - fixes bug in engine code caching.
741 +
742 +    * build crypto-tools-XXXXXXXX.tar.gz if you want to try some of the BSD
743 +      tools for testing OCF (ie., cryptotest).
744 +
745 +How to load the OCF drivers
746 +---------------------------
747 +
748 +    First insert the base modules:
749 +
750 +        insmod ocf
751 +        insmod cryptodev
752 +
753 +    You can then install the software OCF driver with:
754 +
755 +        insmod cryptosoft
756 +
757 +    and one or more of the OCF HW drivers with:
758 +
759 +        insmod safe
760 +        insmod hifn7751
761 +        insmod ixp4xx
762 +        ...
763 +
764 +    all the drivers take a debug option to enable verbose debug so that
765 +    you can see what is going on.  For debug you load them as:
766 +
767 +        insmod ocf crypto_debug=1
768 +        insmod cryptodev cryptodev_debug=1
769 +        insmod cryptosoft swcr_debug=1
770 +
771 +    You may load more than one OCF crypto driver but then there is no guarantee
772 +    as to which will be used.
773 +
774 +    You can also enable debug at run time on 2.6 systems with the following:
775 +
776 +        echo 1 > /sys/module/ocf/parameters/crypto_debug
777 +        echo 1 > /sys/module/cryptodev/parameters/cryptodev_debug
778 +        echo 1 > /sys/module/cryptosoft/parameters/swcr_debug
779 +        echo 1 > /sys/module/hifn7751/parameters/hifn_debug
780 +        echo 1 > /sys/module/safe/parameters/safe_debug
781 +        echo 1 > /sys/module/ixp4xx/parameters/ixp_debug
782 +        ...
783 +
784 +Testing the OCF support
785 +-----------------------
786 +
787 +    run "cryptotest",  it should do a short test for a couple of
788 +    des packets.  If it does everything is working.
789 +
790 +    If this works,  then ssh will use the driver when invoked as:
791 +
792 +        ssh -c 3des username@host
793 +
794 +    to see for sure that it is operating, enable debug as defined above.
795 +
796 +    To get a better idea of performance run:
797 +
798 +        cryptotest 100 4096
799 +
800 +    There are more options to cryptotest,  see the help.
801 +
802 +    It is also possible to use openssl to test the speed of the crypto
803 +    drivers.
804 +
805 +        openssl speed -evp des -engine cryptodev -elapsed
806 +        openssl speed -evp des3 -engine cryptodev -elapsed
807 +        openssl speed -evp aes128 -engine cryptodev -elapsed
808 +
809 +    and multiple threads (10) with:
810 +
811 +        openssl speed -evp des -engine cryptodev -elapsed -multi 10
812 +        openssl speed -evp des3 -engine cryptodev -elapsed -multi 10
813 +        openssl speed -evp aes128 -engine cryptodev -elapsed -multi 10
814 +
815 +    for public key testing you can try:
816 +
817 +        cryptokeytest
818 +        openssl speed -engine cryptodev rsa -elapsed
819 +        openssl speed -engine cryptodev dsa -elapsed
820 +
821 +David McCullough
822 +david_mccullough@securecomputing.com
823 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
824 +++ linux/crypto/ocf/hifn/hifn7751reg.h 2007-06-20 09:15:58.000000000 +1000
825 @@ -0,0 +1,540 @@
826 +/* $FreeBSD: src/sys/dev/hifn/hifn7751reg.h,v 1.7 2007/03/21 03:42:49 sam Exp $ */
827 +/*     $OpenBSD: hifn7751reg.h,v 1.35 2002/04/08 17:49:42 jason Exp $  */
828 +
829 +/*-
830 + * Invertex AEON / Hifn 7751 driver
831 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
832 + * Copyright (c) 1999 Theo de Raadt
833 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
834 + *                     http://www.netsec.net
835 + *
836 + * Please send any comments, feedback, bug-fixes, or feature requests to
837 + * software@invertex.com.
838 + *
839 + * Redistribution and use in source and binary forms, with or without
840 + * modification, are permitted provided that the following conditions
841 + * are met:
842 + *
843 + * 1. Redistributions of source code must retain the above copyright
844 + *    notice, this list of conditions and the following disclaimer.
845 + * 2. Redistributions in binary form must reproduce the above copyright
846 + *    notice, this list of conditions and the following disclaimer in the
847 + *    documentation and/or other materials provided with the distribution.
848 + * 3. The name of the author may not be used to endorse or promote products
849 + *    derived from this software without specific prior written permission.
850 + *
851 + *
852 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
853 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
854 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
855 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
856 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
857 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
858 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
859 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
860 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
861 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
862 + *
863 + * Effort sponsored in part by the Defense Advanced Research Projects
864 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
865 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
866 + *
867 + */
868 +#ifndef __HIFN_H__
869 +#define        __HIFN_H__
870 +
871 +/*
872 + * Some PCI configuration space offset defines.  The names were made
873 + * identical to the names used by the Linux kernel.
874 + */
875 +#define        HIFN_BAR0               PCIR_BAR(0)     /* PUC register map */
876 +#define        HIFN_BAR1               PCIR_BAR(1)     /* DMA register map */
877 +#define        HIFN_TRDY_TIMEOUT       0x40
878 +#define        HIFN_RETRY_TIMEOUT      0x41
879 +
880 +/*
881 + * PCI vendor and device identifiers
882 + * (the names are preserved from their OpenBSD source).
883 + */
884 +#define        PCI_VENDOR_HIFN         0x13a3          /* Hifn */
885 +#define        PCI_PRODUCT_HIFN_7751   0x0005          /* 7751 */
886 +#define        PCI_PRODUCT_HIFN_6500   0x0006          /* 6500 */
887 +#define        PCI_PRODUCT_HIFN_7811   0x0007          /* 7811 */
888 +#define        PCI_PRODUCT_HIFN_7855   0x001f          /* 7855 */
889 +#define        PCI_PRODUCT_HIFN_7951   0x0012          /* 7951 */
890 +#define        PCI_PRODUCT_HIFN_7955   0x0020          /* 7954/7955 */
891 +#define        PCI_PRODUCT_HIFN_7956   0x001d          /* 7956 */
892 +
893 +#define        PCI_VENDOR_INVERTEX     0x14e1          /* Invertex */
894 +#define        PCI_PRODUCT_INVERTEX_AEON 0x0005        /* AEON */
895 +
896 +#define        PCI_VENDOR_NETSEC       0x1660          /* NetSec */
897 +#define        PCI_PRODUCT_NETSEC_7751 0x7751          /* 7751 */
898 +
899 +/*
900 + * The values below should multiple of 4 -- and be large enough to handle
901 + * any command the driver implements.
902 + *
903 + * MAX_COMMAND = base command + mac command + encrypt command +
904 + *                     mac-key + rc4-key
905 + * MAX_RESULT  = base result + mac result + mac + encrypt result
906 + *                     
907 + *
908 + */
909 +#define        HIFN_MAX_COMMAND        (8 + 8 + 8 + 64 + 260)
910 +#define        HIFN_MAX_RESULT         (8 + 4 + 20 + 4)
911 +
912 +/*
913 + * hifn_desc_t
914 + *
915 + * Holds an individual descriptor for any of the rings.
916 + */
917 +typedef struct hifn_desc {
918 +       volatile u_int32_t l;           /* length and status bits */
919 +       volatile u_int32_t p;
920 +} hifn_desc_t;
921 +
922 +/*
923 + * Masks for the "length" field of struct hifn_desc.
924 + */
925 +#define        HIFN_D_LENGTH           0x0000ffff      /* length bit mask */
926 +#define        HIFN_D_MASKDONEIRQ      0x02000000      /* mask the done interrupt */
927 +#define        HIFN_D_DESTOVER         0x04000000      /* destination overflow */
928 +#define        HIFN_D_OVER             0x08000000      /* overflow */
929 +#define        HIFN_D_LAST             0x20000000      /* last descriptor in chain */
930 +#define        HIFN_D_JUMP             0x40000000      /* jump descriptor */
931 +#define        HIFN_D_VALID            0x80000000      /* valid bit */
932 +
933 +
934 +/*
935 + * Processing Unit Registers (offset from BASEREG0)
936 + */
937 +#define        HIFN_0_PUDATA           0x00    /* Processing Unit Data */
938 +#define        HIFN_0_PUCTRL           0x04    /* Processing Unit Control */
939 +#define        HIFN_0_PUISR            0x08    /* Processing Unit Interrupt Status */
940 +#define        HIFN_0_PUCNFG           0x0c    /* Processing Unit Configuration */
941 +#define        HIFN_0_PUIER            0x10    /* Processing Unit Interrupt Enable */
942 +#define        HIFN_0_PUSTAT           0x14    /* Processing Unit Status/Chip ID */
943 +#define        HIFN_0_FIFOSTAT         0x18    /* FIFO Status */
944 +#define        HIFN_0_FIFOCNFG         0x1c    /* FIFO Configuration */
945 +#define        HIFN_0_PUCTRL2          0x28    /* Processing Unit Control (2nd map) */
946 +#define        HIFN_0_MUTE1            0x80
947 +#define        HIFN_0_MUTE2            0x90
948 +#define        HIFN_0_SPACESIZE        0x100   /* Register space size */
949 +
950 +/* Processing Unit Control Register (HIFN_0_PUCTRL) */
951 +#define        HIFN_PUCTRL_CLRSRCFIFO  0x0010  /* clear source fifo */
952 +#define        HIFN_PUCTRL_STOP        0x0008  /* stop pu */
953 +#define        HIFN_PUCTRL_LOCKRAM     0x0004  /* lock ram */
954 +#define        HIFN_PUCTRL_DMAENA      0x0002  /* enable dma */
955 +#define        HIFN_PUCTRL_RESET       0x0001  /* Reset processing unit */
956 +
957 +/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
958 +#define        HIFN_PUISR_CMDINVAL     0x8000  /* Invalid command interrupt */
959 +#define        HIFN_PUISR_DATAERR      0x4000  /* Data error interrupt */
960 +#define        HIFN_PUISR_SRCFIFO      0x2000  /* Source FIFO ready interrupt */
961 +#define        HIFN_PUISR_DSTFIFO      0x1000  /* Destination FIFO ready interrupt */
962 +#define        HIFN_PUISR_DSTOVER      0x0200  /* Destination overrun interrupt */
963 +#define        HIFN_PUISR_SRCCMD       0x0080  /* Source command interrupt */
964 +#define        HIFN_PUISR_SRCCTX       0x0040  /* Source context interrupt */
965 +#define        HIFN_PUISR_SRCDATA      0x0020  /* Source data interrupt */
966 +#define        HIFN_PUISR_DSTDATA      0x0010  /* Destination data interrupt */
967 +#define        HIFN_PUISR_DSTRESULT    0x0004  /* Destination result interrupt */
968 +
969 +/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
970 +#define        HIFN_PUCNFG_DRAMMASK    0xe000  /* DRAM size mask */
971 +#define        HIFN_PUCNFG_DSZ_256K    0x0000  /* 256k dram */
972 +#define        HIFN_PUCNFG_DSZ_512K    0x2000  /* 512k dram */
973 +#define        HIFN_PUCNFG_DSZ_1M      0x4000  /* 1m dram */
974 +#define        HIFN_PUCNFG_DSZ_2M      0x6000  /* 2m dram */
975 +#define        HIFN_PUCNFG_DSZ_4M      0x8000  /* 4m dram */
976 +#define        HIFN_PUCNFG_DSZ_8M      0xa000  /* 8m dram */
977 +#define        HIFN_PUNCFG_DSZ_16M     0xc000  /* 16m dram */
978 +#define        HIFN_PUCNFG_DSZ_32M     0xe000  /* 32m dram */
979 +#define        HIFN_PUCNFG_DRAMREFRESH 0x1800  /* DRAM refresh rate mask */
980 +#define        HIFN_PUCNFG_DRFR_512    0x0000  /* 512 divisor of ECLK */
981 +#define        HIFN_PUCNFG_DRFR_256    0x0800  /* 256 divisor of ECLK */
982 +#define        HIFN_PUCNFG_DRFR_128    0x1000  /* 128 divisor of ECLK */
983 +#define        HIFN_PUCNFG_TCALLPHASES 0x0200  /* your guess is as good as mine... */
984 +#define        HIFN_PUCNFG_TCDRVTOTEM  0x0100  /* your guess is as good as mine... */
985 +#define        HIFN_PUCNFG_BIGENDIAN   0x0080  /* DMA big endian mode */
986 +#define        HIFN_PUCNFG_BUS32       0x0040  /* Bus width 32bits */
987 +#define        HIFN_PUCNFG_BUS16       0x0000  /* Bus width 16 bits */
988 +#define        HIFN_PUCNFG_CHIPID      0x0020  /* Allow chipid from PUSTAT */
989 +#define        HIFN_PUCNFG_DRAM        0x0010  /* Context RAM is DRAM */
990 +#define        HIFN_PUCNFG_SRAM        0x0000  /* Context RAM is SRAM */
991 +#define        HIFN_PUCNFG_COMPSING    0x0004  /* Enable single compression context */
992 +#define        HIFN_PUCNFG_ENCCNFG     0x0002  /* Encryption configuration */
993 +
994 +/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
995 +#define        HIFN_PUIER_CMDINVAL     0x8000  /* Invalid command interrupt */
996 +#define        HIFN_PUIER_DATAERR      0x4000  /* Data error interrupt */
997 +#define        HIFN_PUIER_SRCFIFO      0x2000  /* Source FIFO ready interrupt */
998 +#define        HIFN_PUIER_DSTFIFO      0x1000  /* Destination FIFO ready interrupt */
999 +#define        HIFN_PUIER_DSTOVER      0x0200  /* Destination overrun interrupt */
1000 +#define        HIFN_PUIER_SRCCMD       0x0080  /* Source command interrupt */
1001 +#define        HIFN_PUIER_SRCCTX       0x0040  /* Source context interrupt */
1002 +#define        HIFN_PUIER_SRCDATA      0x0020  /* Source data interrupt */
1003 +#define        HIFN_PUIER_DSTDATA      0x0010  /* Destination data interrupt */
1004 +#define        HIFN_PUIER_DSTRESULT    0x0004  /* Destination result interrupt */
1005 +
1006 +/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
1007 +#define        HIFN_PUSTAT_CMDINVAL    0x8000  /* Invalid command interrupt */
1008 +#define        HIFN_PUSTAT_DATAERR     0x4000  /* Data error interrupt */
1009 +#define        HIFN_PUSTAT_SRCFIFO     0x2000  /* Source FIFO ready interrupt */
1010 +#define        HIFN_PUSTAT_DSTFIFO     0x1000  /* Destination FIFO ready interrupt */
1011 +#define        HIFN_PUSTAT_DSTOVER     0x0200  /* Destination overrun interrupt */
1012 +#define        HIFN_PUSTAT_SRCCMD      0x0080  /* Source command interrupt */
1013 +#define        HIFN_PUSTAT_SRCCTX      0x0040  /* Source context interrupt */
1014 +#define        HIFN_PUSTAT_SRCDATA     0x0020  /* Source data interrupt */
1015 +#define        HIFN_PUSTAT_DSTDATA     0x0010  /* Destination data interrupt */
1016 +#define        HIFN_PUSTAT_DSTRESULT   0x0004  /* Destination result interrupt */
1017 +#define        HIFN_PUSTAT_CHIPREV     0x00ff  /* Chip revision mask */
1018 +#define        HIFN_PUSTAT_CHIPENA     0xff00  /* Chip enabled mask */
1019 +#define        HIFN_PUSTAT_ENA_2       0x1100  /* Level 2 enabled */
1020 +#define        HIFN_PUSTAT_ENA_1       0x1000  /* Level 1 enabled */
1021 +#define        HIFN_PUSTAT_ENA_0       0x3000  /* Level 0 enabled */
1022 +#define        HIFN_PUSTAT_REV_2       0x0020  /* 7751 PT6/2 */
1023 +#define        HIFN_PUSTAT_REV_3       0x0030  /* 7751 PT6/3 */
1024 +
1025 +/* FIFO Status Register (HIFN_0_FIFOSTAT) */
1026 +#define        HIFN_FIFOSTAT_SRC       0x7f00  /* Source FIFO available */
1027 +#define        HIFN_FIFOSTAT_DST       0x007f  /* Destination FIFO available */
1028 +
1029 +/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
1030 +#define        HIFN_FIFOCNFG_THRESHOLD 0x0400  /* must be written as this value */
1031 +
1032 +/*
1033 + * DMA Interface Registers (offset from BASEREG1)
1034 + */
1035 +#define        HIFN_1_DMA_CRAR         0x0c    /* DMA Command Ring Address */
1036 +#define        HIFN_1_DMA_SRAR         0x1c    /* DMA Source Ring Address */
1037 +#define        HIFN_1_DMA_RRAR         0x2c    /* DMA Result Ring Address */
1038 +#define        HIFN_1_DMA_DRAR         0x3c    /* DMA Destination Ring Address */
1039 +#define        HIFN_1_DMA_CSR          0x40    /* DMA Status and Control */
1040 +#define        HIFN_1_DMA_IER          0x44    /* DMA Interrupt Enable */
1041 +#define        HIFN_1_DMA_CNFG         0x48    /* DMA Configuration */
1042 +#define        HIFN_1_PLL              0x4c    /* 7955/7956: PLL config */
1043 +#define        HIFN_1_7811_RNGENA      0x60    /* 7811: rng enable */
1044 +#define        HIFN_1_7811_RNGCFG      0x64    /* 7811: rng config */
1045 +#define        HIFN_1_7811_RNGDAT      0x68    /* 7811: rng data */
1046 +#define        HIFN_1_7811_RNGSTS      0x6c    /* 7811: rng status */
1047 +#define        HIFN_1_DMA_CNFG2        0x6c    /* 7955/7956: dma config #2 */
1048 +#define        HIFN_1_7811_MIPSRST     0x94    /* 7811: MIPS reset */
1049 +#define        HIFN_1_REVID            0x98    /* Revision ID */
1050 +
1051 +#define        HIFN_1_PUB_RESET        0x204   /* Public/RNG Reset */
1052 +#define        HIFN_1_PUB_BASE         0x300   /* Public Base Address */
1053 +#define        HIFN_1_PUB_OPLEN        0x304   /* 7951-compat Public Operand Length */
1054 +#define        HIFN_1_PUB_OP           0x308   /* 7951-compat Public Operand */
1055 +#define        HIFN_1_PUB_STATUS       0x30c   /* 7951-compat Public Status */
1056 +#define        HIFN_1_PUB_IEN          0x310   /* Public Interrupt enable */
1057 +#define        HIFN_1_RNG_CONFIG       0x314   /* RNG config */
1058 +#define        HIFN_1_RNG_DATA         0x318   /* RNG data */
1059 +#define        HIFN_1_PUB_MODE         0x320   /* PK mode */
1060 +#define        HIFN_1_PUB_FIFO_OPLEN   0x380   /* first element of oplen fifo */
1061 +#define        HIFN_1_PUB_FIFO_OP      0x384   /* first element of op fifo */
1062 +#define        HIFN_1_PUB_MEM          0x400   /* start of Public key memory */
1063 +#define        HIFN_1_PUB_MEMEND       0xbff   /* end of Public key memory */
1064 +
1065 +/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
1066 +#define        HIFN_DMACSR_D_CTRLMASK  0xc0000000      /* Destinition Ring Control */
1067 +#define        HIFN_DMACSR_D_CTRL_NOP  0x00000000      /* Dest. Control: no-op */
1068 +#define        HIFN_DMACSR_D_CTRL_DIS  0x40000000      /* Dest. Control: disable */
1069 +#define        HIFN_DMACSR_D_CTRL_ENA  0x80000000      /* Dest. Control: enable */
1070 +#define        HIFN_DMACSR_D_ABORT     0x20000000      /* Destinition Ring PCIAbort */
1071 +#define        HIFN_DMACSR_D_DONE      0x10000000      /* Destinition Ring Done */
1072 +#define        HIFN_DMACSR_D_LAST      0x08000000      /* Destinition Ring Last */
1073 +#define        HIFN_DMACSR_D_WAIT      0x04000000      /* Destinition Ring Waiting */
1074 +#define        HIFN_DMACSR_D_OVER      0x02000000      /* Destinition Ring Overflow */
1075 +#define        HIFN_DMACSR_R_CTRL      0x00c00000      /* Result Ring Control */
1076 +#define        HIFN_DMACSR_R_CTRL_NOP  0x00000000      /* Result Control: no-op */
1077 +#define        HIFN_DMACSR_R_CTRL_DIS  0x00400000      /* Result Control: disable */
1078 +#define        HIFN_DMACSR_R_CTRL_ENA  0x00800000      /* Result Control: enable */
1079 +#define        HIFN_DMACSR_R_ABORT     0x00200000      /* Result Ring PCI Abort */
1080 +#define        HIFN_DMACSR_R_DONE      0x00100000      /* Result Ring Done */
1081 +#define        HIFN_DMACSR_R_LAST      0x00080000      /* Result Ring Last */
1082 +#define        HIFN_DMACSR_R_WAIT      0x00040000      /* Result Ring Waiting */
1083 +#define        HIFN_DMACSR_R_OVER      0x00020000      /* Result Ring Overflow */
1084 +#define        HIFN_DMACSR_S_CTRL      0x0000c000      /* Source Ring Control */
1085 +#define        HIFN_DMACSR_S_CTRL_NOP  0x00000000      /* Source Control: no-op */
1086 +#define        HIFN_DMACSR_S_CTRL_DIS  0x00004000      /* Source Control: disable */
1087 +#define        HIFN_DMACSR_S_CTRL_ENA  0x00008000      /* Source Control: enable */
1088 +#define        HIFN_DMACSR_S_ABORT     0x00002000      /* Source Ring PCI Abort */
1089 +#define        HIFN_DMACSR_S_DONE      0x00001000      /* Source Ring Done */
1090 +#define        HIFN_DMACSR_S_LAST      0x00000800      /* Source Ring Last */
1091 +#define        HIFN_DMACSR_S_WAIT      0x00000400      /* Source Ring Waiting */
1092 +#define        HIFN_DMACSR_ILLW        0x00000200      /* Illegal write (7811 only) */
1093 +#define        HIFN_DMACSR_ILLR        0x00000100      /* Illegal read (7811 only) */
1094 +#define        HIFN_DMACSR_C_CTRL      0x000000c0      /* Command Ring Control */
1095 +#define        HIFN_DMACSR_C_CTRL_NOP  0x00000000      /* Command Control: no-op */
1096 +#define        HIFN_DMACSR_C_CTRL_DIS  0x00000040      /* Command Control: disable */
1097 +#define        HIFN_DMACSR_C_CTRL_ENA  0x00000080      /* Command Control: enable */
1098 +#define        HIFN_DMACSR_C_ABORT     0x00000020      /* Command Ring PCI Abort */
1099 +#define        HIFN_DMACSR_C_DONE      0x00000010      /* Command Ring Done */
1100 +#define        HIFN_DMACSR_C_LAST      0x00000008      /* Command Ring Last */
1101 +#define        HIFN_DMACSR_C_WAIT      0x00000004      /* Command Ring Waiting */
1102 +#define        HIFN_DMACSR_PUBDONE     0x00000002      /* Public op done (7951 only) */
1103 +#define        HIFN_DMACSR_ENGINE      0x00000001      /* Command Ring Engine IRQ */
1104 +
1105 +/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
1106 +#define        HIFN_DMAIER_D_ABORT     0x20000000      /* Destination Ring PCIAbort */
1107 +#define        HIFN_DMAIER_D_DONE      0x10000000      /* Destination Ring Done */
1108 +#define        HIFN_DMAIER_D_LAST      0x08000000      /* Destination Ring Last */
1109 +#define        HIFN_DMAIER_D_WAIT      0x04000000      /* Destination Ring Waiting */
1110 +#define        HIFN_DMAIER_D_OVER      0x02000000      /* Destination Ring Overflow */
1111 +#define        HIFN_DMAIER_R_ABORT     0x00200000      /* Result Ring PCI Abort */
1112 +#define        HIFN_DMAIER_R_DONE      0x00100000      /* Result Ring Done */
1113 +#define        HIFN_DMAIER_R_LAST      0x00080000      /* Result Ring Last */
1114 +#define        HIFN_DMAIER_R_WAIT      0x00040000      /* Result Ring Waiting */
1115 +#define        HIFN_DMAIER_R_OVER      0x00020000      /* Result Ring Overflow */
1116 +#define        HIFN_DMAIER_S_ABORT     0x00002000      /* Source Ring PCI Abort */
1117 +#define        HIFN_DMAIER_S_DONE      0x00001000      /* Source Ring Done */
1118 +#define        HIFN_DMAIER_S_LAST      0x00000800      /* Source Ring Last */
1119 +#define        HIFN_DMAIER_S_WAIT      0x00000400      /* Source Ring Waiting */
1120 +#define        HIFN_DMAIER_ILLW        0x00000200      /* Illegal write (7811 only) */
1121 +#define        HIFN_DMAIER_ILLR        0x00000100      /* Illegal read (7811 only) */
1122 +#define        HIFN_DMAIER_C_ABORT     0x00000020      /* Command Ring PCI Abort */
1123 +#define        HIFN_DMAIER_C_DONE      0x00000010      /* Command Ring Done */
1124 +#define        HIFN_DMAIER_C_LAST      0x00000008      /* Command Ring Last */
1125 +#define        HIFN_DMAIER_C_WAIT      0x00000004      /* Command Ring Waiting */
1126 +#define        HIFN_DMAIER_PUBDONE     0x00000002      /* public op done (7951 only) */
1127 +#define        HIFN_DMAIER_ENGINE      0x00000001      /* Engine IRQ */
1128 +
1129 +/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
1130 +#define        HIFN_DMACNFG_BIGENDIAN  0x10000000      /* big endian mode */
1131 +#define        HIFN_DMACNFG_POLLFREQ   0x00ff0000      /* Poll frequency mask */
1132 +#define        HIFN_DMACNFG_UNLOCK     0x00000800
1133 +#define        HIFN_DMACNFG_POLLINVAL  0x00000700      /* Invalid Poll Scalar */
1134 +#define        HIFN_DMACNFG_LAST       0x00000010      /* Host control LAST bit */
1135 +#define        HIFN_DMACNFG_MODE       0x00000004      /* DMA mode */
1136 +#define        HIFN_DMACNFG_DMARESET   0x00000002      /* DMA Reset # */
1137 +#define        HIFN_DMACNFG_MSTRESET   0x00000001      /* Master Reset # */
1138 +
1139 +/* DMA Configuration Register (HIFN_1_DMA_CNFG2) */
1140 +#define        HIFN_DMACNFG2_PKSWAP32  (1 << 19)       /* swap the OPLEN/OP reg */
1141 +#define        HIFN_DMACNFG2_PKSWAP8   (1 << 18)       /* swap the bits of OPLEN/OP */
1142 +#define        HIFN_DMACNFG2_BAR0_SWAP32 (1<<17)       /* swap the bytes of BAR0 */
1143 +#define        HIFN_DMACNFG2_BAR1_SWAP8 (1<<16)        /* swap the bits  of BAR0 */
1144 +#define        HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT 12
1145 +#define        HIFN_DMACNFG2_INIT_READ_BURST_SHIFT 8
1146 +#define        HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT 4
1147 +#define        HIFN_DMACNFG2_TGT_READ_BURST_SHIFT  0
1148 +
1149 +/* 7811 RNG Enable Register (HIFN_1_7811_RNGENA) */
1150 +#define        HIFN_7811_RNGENA_ENA    0x00000001      /* enable RNG */
1151 +
1152 +/* 7811 RNG Config Register (HIFN_1_7811_RNGCFG) */
1153 +#define        HIFN_7811_RNGCFG_PRE1   0x00000f00      /* first prescalar */
1154 +#define        HIFN_7811_RNGCFG_OPRE   0x00000080      /* output prescalar */
1155 +#define        HIFN_7811_RNGCFG_DEFL   0x00000f80      /* 2 words/ 1/100 sec */
1156 +
1157 +/* 7811 RNG Status Register (HIFN_1_7811_RNGSTS) */
1158 +#define        HIFN_7811_RNGSTS_RDY    0x00004000      /* two numbers in FIFO */
1159 +#define        HIFN_7811_RNGSTS_UFL    0x00001000      /* rng underflow */
1160 +
1161 +/* 7811 MIPS Reset Register (HIFN_1_7811_MIPSRST) */
1162 +#define        HIFN_MIPSRST_BAR2SIZE   0xffff0000      /* sdram size */
1163 +#define        HIFN_MIPSRST_GPRAMINIT  0x00008000      /* gpram can be accessed */
1164 +#define        HIFN_MIPSRST_CRAMINIT   0x00004000      /* ctxram can be accessed */
1165 +#define        HIFN_MIPSRST_LED2       0x00000400      /* external LED2 */
1166 +#define        HIFN_MIPSRST_LED1       0x00000200      /* external LED1 */
1167 +#define        HIFN_MIPSRST_LED0       0x00000100      /* external LED0 */
1168 +#define        HIFN_MIPSRST_MIPSDIS    0x00000004      /* disable MIPS */
1169 +#define        HIFN_MIPSRST_MIPSRST    0x00000002      /* warm reset MIPS */
1170 +#define        HIFN_MIPSRST_MIPSCOLD   0x00000001      /* cold reset MIPS */
1171 +
1172 +/* Public key reset register (HIFN_1_PUB_RESET) */
1173 +#define        HIFN_PUBRST_RESET       0x00000001      /* reset public/rng unit */
1174 +
1175 +/* Public operation register (HIFN_1_PUB_OP) */
1176 +#define        HIFN_PUBOP_AOFFSET      0x0000003e      /* A offset */
1177 +#define        HIFN_PUBOP_BOFFSET      0x00000fc0      /* B offset */
1178 +#define        HIFN_PUBOP_MOFFSET      0x0003f000      /* M offset */
1179 +#define        HIFN_PUBOP_OP_MASK      0x003c0000      /* Opcode: */
1180 +#define        HIFN_PUBOP_OP_NOP       0x00000000      /*  NOP */
1181 +#define        HIFN_PUBOP_OP_ADD       0x00040000      /*  ADD */
1182 +#define        HIFN_PUBOP_OP_ADDC      0x00080000      /*  ADD w/carry */
1183 +#define        HIFN_PUBOP_OP_SUB       0x000c0000      /*  SUB */
1184 +#define        HIFN_PUBOP_OP_SUBC      0x00100000      /*  SUB w/carry */
1185 +#define        HIFN_PUBOP_OP_MODADD    0x00140000      /*  Modular ADD */
1186 +#define        HIFN_PUBOP_OP_MODSUB    0x00180000      /*  Modular SUB */
1187 +#define        HIFN_PUBOP_OP_INCA      0x001c0000      /*  INC A */
1188 +#define        HIFN_PUBOP_OP_DECA      0x00200000      /*  DEC A */
1189 +#define        HIFN_PUBOP_OP_MULT      0x00240000      /*  MULT */
1190 +#define        HIFN_PUBOP_OP_MODMULT   0x00280000      /*  Modular MULT */
1191 +#define        HIFN_PUBOP_OP_MODRED    0x002c0000      /*  Modular Red */
1192 +#define        HIFN_PUBOP_OP_MODEXP    0x00300000      /*  Modular Exp */
1193 +
1194 +/* Public operand length register (HIFN_1_PUB_OPLEN) */
1195 +#define        HIFN_PUBOPLEN_MODLEN    0x0000007f
1196 +#define        HIFN_PUBOPLEN_EXPLEN    0x0003ff80
1197 +#define        HIFN_PUBOPLEN_REDLEN    0x003c0000
1198 +
1199 +/* Public status register (HIFN_1_PUB_STATUS) */
1200 +#define        HIFN_PUBSTS_DONE        0x00000001      /* operation done */
1201 +#define        HIFN_PUBSTS_CARRY       0x00000002      /* carry */
1202 +#define        HIFN_PUBSTS_FIFO_EMPTY  0x00000100      /* fifo empty */
1203 +#define        HIFN_PUBSTS_FIFO_FULL   0x00000200      /* fifo full */
1204 +#define        HIFN_PUBSTS_FIFO_OVFL   0x00000400      /* fifo overflow */
1205 +#define        HIFN_PUBSTS_FIFO_WRITE  0x000f0000      /* fifo write */
1206 +#define        HIFN_PUBSTS_FIFO_READ   0x0f000000      /* fifo read */
1207 +
1208 +/* Public interrupt enable register (HIFN_1_PUB_IEN) */
1209 +#define        HIFN_PUBIEN_DONE        0x00000001      /* operation done interrupt */
1210 +
1211 +/* Random number generator config register (HIFN_1_RNG_CONFIG) */
1212 +#define        HIFN_RNGCFG_ENA         0x00000001      /* enable rng */
1213 +
1214 +/*
1215 + * Register offsets in register set 1
1216 + */
1217 +
1218 +#define        HIFN_UNLOCK_SECRET1     0xf4
1219 +#define        HIFN_UNLOCK_SECRET2     0xfc
1220 +
1221 +/*
1222 + * PLL config register
1223 + *
1224 + * This register is present only on 7954/7955/7956 parts. It must be
1225 + * programmed according to the bus interface method used by the h/w.
1226 + * Note that the parts require a stable clock.  Since the PCI clock
1227 + * may vary the reference clock must usually be used.  To avoid
1228 + * overclocking the core logic, setup must be done carefully, refer
1229 + * to the driver for details.  The exact multiplier required varies
1230 + * by part and system configuration; refer to the Hifn documentation.
1231 + */
1232 +#define        HIFN_PLL_REF_SEL        0x00000001      /* REF/HBI clk selection */
1233 +#define        HIFN_PLL_BP             0x00000002      /* bypass (used during setup) */
1234 +/* bit 2 reserved */
1235 +#define        HIFN_PLL_PK_CLK_SEL     0x00000008      /* public key clk select */
1236 +#define        HIFN_PLL_PE_CLK_SEL     0x00000010      /* packet engine clk select */
1237 +/* bits 5-9 reserved */
1238 +#define        HIFN_PLL_MBSET          0x00000400      /* must be set to 1 */
1239 +#define        HIFN_PLL_ND             0x00003800      /* Fpll_ref multiplier select */
1240 +#define        HIFN_PLL_ND_SHIFT       11
1241 +#define        HIFN_PLL_ND_2           0x00000000      /* 2x */
1242 +#define        HIFN_PLL_ND_4           0x00000800      /* 4x */
1243 +#define        HIFN_PLL_ND_6           0x00001000      /* 6x */
1244 +#define        HIFN_PLL_ND_8           0x00001800      /* 8x */
1245 +#define        HIFN_PLL_ND_10          0x00002000      /* 10x */
1246 +#define        HIFN_PLL_ND_12          0x00002800      /* 12x */
1247 +/* bits 14-15 reserved */
1248 +#define        HIFN_PLL_IS             0x00010000      /* charge pump current select */
1249 +/* bits 17-31 reserved */
1250 +
1251 +/*
1252 + * Board configuration specifies only these bits.
1253 + */
1254 +#define        HIFN_PLL_CONFIG         (HIFN_PLL_IS|HIFN_PLL_ND|HIFN_PLL_REF_SEL)
1255 +
1256 +/*
1257 + * Public Key Engine Mode Register
1258 + */
1259 +#define        HIFN_PKMODE_HOSTINVERT  (1 << 0)        /* HOST INVERT */
1260 +#define        HIFN_PKMODE_ENHANCED    (1 << 1)        /* Enable enhanced mode */
1261 +
1262 +
1263 +/*********************************************************************
1264 + * Structs for board commands 
1265 + *
1266 + *********************************************************************/
1267 +
1268 +/*
1269 + * Structure to help build up the command data structure.
1270 + */
1271 +typedef struct hifn_base_command {
1272 +       volatile u_int16_t masks;
1273 +       volatile u_int16_t session_num;
1274 +       volatile u_int16_t total_source_count;
1275 +       volatile u_int16_t total_dest_count;
1276 +} hifn_base_command_t;
1277 +
1278 +#define        HIFN_BASE_CMD_MAC               0x0400
1279 +#define        HIFN_BASE_CMD_CRYPT             0x0800
1280 +#define        HIFN_BASE_CMD_DECODE            0x2000
1281 +#define        HIFN_BASE_CMD_SRCLEN_M          0xc000
1282 +#define        HIFN_BASE_CMD_SRCLEN_S          14
1283 +#define        HIFN_BASE_CMD_DSTLEN_M          0x3000
1284 +#define        HIFN_BASE_CMD_DSTLEN_S          12
1285 +#define        HIFN_BASE_CMD_LENMASK_HI        0x30000
1286 +#define        HIFN_BASE_CMD_LENMASK_LO        0x0ffff
1287 +
1288 +/*
1289 + * Structure to help build up the command data structure.
1290 + */
1291 +typedef struct hifn_crypt_command {
1292 +       volatile u_int16_t masks;
1293 +       volatile u_int16_t header_skip;
1294 +       volatile u_int16_t source_count;
1295 +       volatile u_int16_t reserved;
1296 +} hifn_crypt_command_t;
1297 +
1298 +#define        HIFN_CRYPT_CMD_ALG_MASK         0x0003          /* algorithm: */
1299 +#define        HIFN_CRYPT_CMD_ALG_DES          0x0000          /*   DES */
1300 +#define        HIFN_CRYPT_CMD_ALG_3DES         0x0001          /*   3DES */
1301 +#define        HIFN_CRYPT_CMD_ALG_RC4          0x0002          /*   RC4 */
1302 +#define        HIFN_CRYPT_CMD_ALG_AES          0x0003          /*   AES */
1303 +#define        HIFN_CRYPT_CMD_MODE_MASK        0x0018          /* Encrypt mode: */
1304 +#define        HIFN_CRYPT_CMD_MODE_ECB         0x0000          /*   ECB */
1305 +#define        HIFN_CRYPT_CMD_MODE_CBC         0x0008          /*   CBC */
1306 +#define        HIFN_CRYPT_CMD_MODE_CFB         0x0010          /*   CFB */
1307 +#define        HIFN_CRYPT_CMD_MODE_OFB         0x0018          /*   OFB */
1308 +#define        HIFN_CRYPT_CMD_CLR_CTX          0x0040          /* clear context */
1309 +#define        HIFN_CRYPT_CMD_NEW_KEY          0x0800          /* expect new key */
1310 +#define        HIFN_CRYPT_CMD_NEW_IV           0x1000          /* expect new iv */
1311 +
1312 +#define        HIFN_CRYPT_CMD_SRCLEN_M         0xc000
1313 +#define        HIFN_CRYPT_CMD_SRCLEN_S         14
1314 +
1315 +#define        HIFN_CRYPT_CMD_KSZ_MASK         0x0600          /* AES key size: */
1316 +#define        HIFN_CRYPT_CMD_KSZ_128          0x0000          /*   128 bit */
1317 +#define        HIFN_CRYPT_CMD_KSZ_192          0x0200          /*   192 bit */
1318 +#define        HIFN_CRYPT_CMD_KSZ_256          0x0400          /*   256 bit */
1319 +
1320 +/*
1321 + * Structure to help build up the command data structure.
1322 + */
1323 +typedef struct hifn_mac_command {
1324 +       volatile u_int16_t masks;
1325 +       volatile u_int16_t header_skip;
1326 +       volatile u_int16_t source_count;
1327 +       volatile u_int16_t reserved;
1328 +} hifn_mac_command_t;
1329 +
1330 +#define        HIFN_MAC_CMD_ALG_MASK           0x0001
1331 +#define        HIFN_MAC_CMD_ALG_SHA1           0x0000
1332 +#define        HIFN_MAC_CMD_ALG_MD5            0x0001
1333 +#define        HIFN_MAC_CMD_MODE_MASK          0x000c
1334 +#define        HIFN_MAC_CMD_MODE_HMAC          0x0000
1335 +#define        HIFN_MAC_CMD_MODE_SSL_MAC       0x0004
1336 +#define        HIFN_MAC_CMD_MODE_HASH          0x0008
1337 +#define        HIFN_MAC_CMD_MODE_FULL          0x0004
1338 +#define        HIFN_MAC_CMD_TRUNC              0x0010
1339 +#define        HIFN_MAC_CMD_RESULT             0x0020
1340 +#define        HIFN_MAC_CMD_APPEND             0x0040
1341 +#define        HIFN_MAC_CMD_SRCLEN_M           0xc000
1342 +#define        HIFN_MAC_CMD_SRCLEN_S           14
1343 +
1344 +/*
1345 + * MAC POS IPsec initiates authentication after encryption on encodes
1346 + * and before decryption on decodes.
1347 + */
1348 +#define        HIFN_MAC_CMD_POS_IPSEC          0x0200
1349 +#define        HIFN_MAC_CMD_NEW_KEY            0x0800
1350 +
1351 +/*
1352 + * The poll frequency and poll scalar defines are unshifted values used
1353 + * to set fields in the DMA Configuration Register.
1354 + */
1355 +#ifndef HIFN_POLL_FREQUENCY
1356 +#define        HIFN_POLL_FREQUENCY     0x1
1357 +#endif
1358 +
1359 +#ifndef HIFN_POLL_SCALAR
1360 +#define        HIFN_POLL_SCALAR        0x0
1361 +#endif
1362 +
1363 +#define        HIFN_MAX_SEGLEN         0xffff          /* maximum dma segment len */
1364 +#define        HIFN_MAX_DMALEN         0x3ffff         /* maximum dma length */
1365 +#endif /* __HIFN_H__ */
1366 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
1367 +++ linux/crypto/ocf/hifn/hifn7751var.h 2007-06-20 09:22:39.000000000 +1000
1368 @@ -0,0 +1,369 @@
1369 +/* $FreeBSD: src/sys/dev/hifn/hifn7751var.h,v 1.9 2007/03/21 03:42:49 sam Exp $ */
1370 +/*     $OpenBSD: hifn7751var.h,v 1.42 2002/04/08 17:49:42 jason Exp $  */
1371 +
1372 +/*-
1373 + * Invertex AEON / Hifn 7751 driver
1374 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
1375 + * Copyright (c) 1999 Theo de Raadt
1376 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
1377 + *                     http://www.netsec.net
1378 + *
1379 + * Please send any comments, feedback, bug-fixes, or feature requests to
1380 + * software@invertex.com.
1381 + *
1382 + * Redistribution and use in source and binary forms, with or without
1383 + * modification, are permitted provided that the following conditions
1384 + * are met:
1385 + *
1386 + * 1. Redistributions of source code must retain the above copyright
1387 + *    notice, this list of conditions and the following disclaimer.
1388 + * 2. Redistributions in binary form must reproduce the above copyright
1389 + *    notice, this list of conditions and the following disclaimer in the
1390 + *    documentation and/or other materials provided with the distribution.
1391 + * 3. The name of the author may not be used to endorse or promote products
1392 + *    derived from this software without specific prior written permission.
1393 + *
1394 + *
1395 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1396 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1397 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1398 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1399 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
1400 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1401 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1402 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1403 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
1404 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1405 + *
1406 + * Effort sponsored in part by the Defense Advanced Research Projects
1407 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
1408 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
1409 + *
1410 + */
1411 +
1412 +#ifndef __HIFN7751VAR_H__
1413 +#define __HIFN7751VAR_H__
1414 +
1415 +#ifdef __KERNEL__
1416 +
1417 +/*
1418 + * Some configurable values for the driver.  By default command+result
1419 + * descriptor rings are the same size.  The src+dst descriptor rings
1420 + * are sized at 3.5x the number of potential commands.  Slower parts
1421 + * (e.g. 7951) tend to run out of src descriptors; faster parts (7811)
1422 + * src+cmd/result descriptors.  It's not clear that increasing the size
1423 + * of the descriptor rings helps performance significantly as other
1424 + * factors tend to come into play (e.g. copying misaligned packets).
1425 + */
1426 +#define        HIFN_D_CMD_RSIZE        24      /* command descriptors */
1427 +#define        HIFN_D_SRC_RSIZE        ((HIFN_D_CMD_RSIZE * 7) / 2)    /* source descriptors */
1428 +#define        HIFN_D_RES_RSIZE        HIFN_D_CMD_RSIZE        /* result descriptors */
1429 +#define        HIFN_D_DST_RSIZE        HIFN_D_SRC_RSIZE        /* destination descriptors */
1430 +
1431 +/*
1432 + *  Length values for cryptography
1433 + */
1434 +#define HIFN_DES_KEY_LENGTH            8
1435 +#define HIFN_3DES_KEY_LENGTH           24
1436 +#define HIFN_MAX_CRYPT_KEY_LENGTH      HIFN_3DES_KEY_LENGTH
1437 +#define HIFN_IV_LENGTH                 8
1438 +#define        HIFN_AES_IV_LENGTH              16
1439 +#define HIFN_MAX_IV_LENGTH             HIFN_AES_IV_LENGTH
1440 +
1441 +/*
1442 + *  Length values for authentication
1443 + */
1444 +#define HIFN_MAC_KEY_LENGTH            64
1445 +#define HIFN_MD5_LENGTH                        16
1446 +#define HIFN_SHA1_LENGTH               20
1447 +#define HIFN_MAC_TRUNC_LENGTH          12
1448 +
1449 +#define MAX_SCATTER 64
1450 +
1451 +/*
1452 + * Data structure to hold all 4 rings and any other ring related data.
1453 + */
1454 +struct hifn_dma {
1455 +       /*
1456 +        *  Descriptor rings.  We add +1 to the size to accomidate the
1457 +        *  jump descriptor.
1458 +        */
1459 +       struct hifn_desc        cmdr[HIFN_D_CMD_RSIZE+1];
1460 +       struct hifn_desc        srcr[HIFN_D_SRC_RSIZE+1];
1461 +       struct hifn_desc        dstr[HIFN_D_DST_RSIZE+1];
1462 +       struct hifn_desc        resr[HIFN_D_RES_RSIZE+1];
1463 +
1464 +       struct hifn_command     *hifn_commands[HIFN_D_RES_RSIZE];
1465 +
1466 +       u_char                  command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
1467 +       u_char                  result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
1468 +       u_int32_t               slop[HIFN_D_CMD_RSIZE];
1469 +
1470 +       u_int64_t               test_src, test_dst;
1471 +
1472 +       /*
1473 +        *  Our current positions for insertion and removal from the desriptor
1474 +        *  rings. 
1475 +        */
1476 +       int                     cmdi, srci, dsti, resi;
1477 +       volatile int            cmdu, srcu, dstu, resu;
1478 +       int                     cmdk, srck, dstk, resk;
1479 +};
1480 +
1481 +struct hifn_session {
1482 +       int hs_used;
1483 +       int hs_mlen;
1484 +       u_int8_t hs_iv[HIFN_MAX_IV_LENGTH];
1485 +};
1486 +
1487 +#define        HIFN_RING_SYNC(sc, r, i, f)                                     \
1488 +       /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
1489 +
1490 +#define        HIFN_CMDR_SYNC(sc, i, f)        HIFN_RING_SYNC((sc), cmdr, (i), (f))
1491 +#define        HIFN_RESR_SYNC(sc, i, f)        HIFN_RING_SYNC((sc), resr, (i), (f))
1492 +#define        HIFN_SRCR_SYNC(sc, i, f)        HIFN_RING_SYNC((sc), srcr, (i), (f))
1493 +#define        HIFN_DSTR_SYNC(sc, i, f)        HIFN_RING_SYNC((sc), dstr, (i), (f))
1494 +
1495 +#define        HIFN_CMD_SYNC(sc, i, f)                                         \
1496 +       /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
1497 +
1498 +#define        HIFN_RES_SYNC(sc, i, f)                                         \
1499 +       /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
1500 +
1501 +typedef int bus_size_t;
1502 +
1503 +/*
1504 + * Holds data specific to a single HIFN board.
1505 + */
1506 +struct hifn_softc {
1507 +       softc_device_decl                sc_dev;
1508 +
1509 +       struct pci_dev          *sc_pcidev;     /* PCI device pointer */
1510 +       spinlock_t              sc_mtx;         /* per-instance lock */
1511 +
1512 +       int                     sc_num;         /* for multiple devs */
1513 +
1514 +       ocf_iomem_t             sc_bar0;
1515 +       bus_size_t              sc_bar0_lastreg;/* bar0 last reg written */
1516 +       ocf_iomem_t             sc_bar1;
1517 +       bus_size_t              sc_bar1_lastreg;/* bar1 last reg written */
1518 +
1519 +       int                     sc_irq;
1520 +
1521 +       u_int32_t               sc_dmaier;
1522 +       u_int32_t               sc_drammodel;   /* 1=dram, 0=sram */
1523 +       u_int32_t               sc_pllconfig;   /* 7954/7955/7956 PLL config */
1524 +
1525 +       struct hifn_dma         *sc_dma;
1526 +       dma_addr_t              sc_dma_physaddr;/* physical address of sc_dma */
1527 +
1528 +       int                     sc_dmansegs;
1529 +       int32_t                 sc_cid;
1530 +       int                     sc_maxses;
1531 +       int                     sc_nsessions;
1532 +       struct hifn_session     *sc_sessions;
1533 +       int                     sc_ramsize;
1534 +       int                     sc_flags;
1535 +#define        HIFN_HAS_RNG            0x1     /* includes random number generator */
1536 +#define        HIFN_HAS_PUBLIC         0x2     /* includes public key support */
1537 +#define        HIFN_HAS_AES            0x4     /* includes AES support */
1538 +#define        HIFN_IS_7811            0x8     /* Hifn 7811 part */
1539 +#define        HIFN_IS_7956            0x10    /* Hifn 7956/7955 don't have SDRAM */
1540 +
1541 +       struct timer_list       sc_tickto;      /* for managing DMA */
1542 +
1543 +       int                     sc_rngfirst;
1544 +       int                     sc_rnghz;       /* RNG polling frequency */
1545 +
1546 +       int                     sc_c_busy;      /* command ring busy */
1547 +       int                     sc_s_busy;      /* source data ring busy */
1548 +       int                     sc_d_busy;      /* destination data ring busy */
1549 +       int                     sc_r_busy;      /* result ring busy */
1550 +       int                     sc_active;      /* for initial countdown */
1551 +       int                     sc_needwakeup;  /* ops q'd wating on resources */
1552 +       int                     sc_curbatch;    /* # ops submitted w/o int */
1553 +       int                     sc_suspended;
1554 +#ifdef HIFN_VULCANDEV
1555 +       struct cdev            *sc_pkdev;
1556 +#endif
1557 +};
1558 +
1559 +#define        HIFN_LOCK(_sc)          spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
1560 +#define        HIFN_UNLOCK(_sc)        spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
1561 +
1562 +/*
1563 + *  hifn_command_t
1564 + *
1565 + *  This is the control structure used to pass commands to hifn_encrypt().
1566 + *
1567 + *  flags
1568 + *  -----
1569 + *  Flags is the bitwise "or" values for command configuration.  A single
1570 + *  encrypt direction needs to be set:
1571 + *
1572 + *     HIFN_ENCODE or HIFN_DECODE
1573 + *
1574 + *  To use cryptography, a single crypto algorithm must be included:
1575 + *
1576 + *     HIFN_CRYPT_3DES or HIFN_CRYPT_DES
1577 + *
1578 + *  To use authentication is used, a single MAC algorithm must be included:
1579 + *
1580 + *     HIFN_MAC_MD5 or HIFN_MAC_SHA1
1581 + *
1582 + *  By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash.
1583 + *  If the value below is set, hash values are truncated or assumed
1584 + *  truncated to 12 bytes:
1585 + *
1586 + *     HIFN_MAC_TRUNC
1587 + *
1588 + *  Keys for encryption and authentication can be sent as part of a command,
1589 + *  or the last key value used with a particular session can be retrieved
1590 + *  and used again if either of these flags are not specified.
1591 + *
1592 + *     HIFN_CRYPT_NEW_KEY, HIFN_MAC_NEW_KEY
1593 + *
1594 + *  session_num
1595 + *  -----------
1596 + *  A number between 0 and 2048 (for DRAM models) or a number between 
1597 + *  0 and 768 (for SRAM models).  Those who don't want to use session
1598 + *  numbers should leave value at zero and send a new crypt key and/or
1599 + *  new MAC key on every command.  If you use session numbers and
1600 + *  don't send a key with a command, the last key sent for that same
1601 + *  session number will be used.
1602 + *
1603 + *  Warning:  Using session numbers and multiboard at the same time
1604 + *            is currently broken.
1605 + *
1606 + *  mbuf
1607 + *  ----
1608 + *  Either fill in the mbuf pointer and npa=0 or
1609 + *      fill packp[] and packl[] and set npa to > 0
1610 + * 
1611 + *  mac_header_skip
1612 + *  ---------------
1613 + *  The number of bytes of the source_buf that are skipped over before
1614 + *  authentication begins.  This must be a number between 0 and 2^16-1
1615 + *  and can be used by IPsec implementers to skip over IP headers.
1616 + *  *** Value ignored if authentication not used ***
1617 + *
1618 + *  crypt_header_skip
1619 + *  -----------------
1620 + *  The number of bytes of the source_buf that are skipped over before
1621 + *  the cryptographic operation begins.  This must be a number between 0
1622 + *  and 2^16-1.  For IPsec, this number will always be 8 bytes larger
1623 + *  than the auth_header_skip (to skip over the ESP header).
1624 + *  *** Value ignored if cryptography not used ***
1625 + *
1626 + */
1627 +struct hifn_operand {
1628 +       union {
1629 +               struct sk_buff *skb;
1630 +               struct uio *io;
1631 +               unsigned char *buf;
1632 +       } u;
1633 +       void            *map;
1634 +       bus_size_t      mapsize;
1635 +       int             nsegs;
1636 +       struct {
1637 +           dma_addr_t  ds_addr;
1638 +           int         ds_len;
1639 +       } segs[MAX_SCATTER];
1640 +};
1641 +
1642 +struct hifn_command {
1643 +       u_int16_t session_num;
1644 +       u_int16_t base_masks, cry_masks, mac_masks;
1645 +       u_int8_t iv[HIFN_MAX_IV_LENGTH], *ck, mac[HIFN_MAC_KEY_LENGTH];
1646 +       int cklen;
1647 +       int sloplen, slopidx;
1648 +
1649 +       struct hifn_operand src;
1650 +       struct hifn_operand dst;
1651 +
1652 +       struct hifn_softc *softc;
1653 +       struct cryptop *crp;
1654 +       struct cryptodesc *enccrd, *maccrd;
1655 +};
1656 +
1657 +#define        src_skb         src.u.skb
1658 +#define        src_io          src.u.io
1659 +#define        src_map         src.map
1660 +#define        src_mapsize     src.mapsize
1661 +#define        src_segs        src.segs
1662 +#define        src_nsegs       src.nsegs
1663 +#define        src_buf         src.u.buf
1664 +
1665 +#define        dst_skb         dst.u.skb
1666 +#define        dst_io          dst.u.io
1667 +#define        dst_map         dst.map
1668 +#define        dst_mapsize     dst.mapsize
1669 +#define        dst_segs        dst.segs
1670 +#define        dst_nsegs       dst.nsegs
1671 +#define        dst_buf         dst.u.buf
1672 +
1673 +/*
1674 + *  Return values for hifn_crypto()
1675 + */
1676 +#define HIFN_CRYPTO_SUCCESS    0
1677 +#define HIFN_CRYPTO_BAD_INPUT  (-1)
1678 +#define HIFN_CRYPTO_RINGS_FULL (-2)
1679 +
1680 +/**************************************************************************
1681 + *
1682 + *  Function:  hifn_crypto
1683 + *
1684 + *  Purpose:   Called by external drivers to begin an encryption on the
1685 + *             HIFN board.
1686 + *
1687 + *  Blocking/Non-blocking Issues
1688 + *  ============================
1689 + *  The driver cannot block in hifn_crypto (no calls to tsleep) currently.
1690 + *  hifn_crypto() returns HIFN_CRYPTO_RINGS_FULL if there is not enough
1691 + *  room in any of the rings for the request to proceed.
1692 + *
1693 + *  Return Values
1694 + *  =============
1695 + *  0 for success, negative values on error
1696 + *
1697 + *  Defines for negative error codes are:
1698 + *  
1699 + *    HIFN_CRYPTO_BAD_INPUT  :  The passed in command had invalid settings.
1700 + *    HIFN_CRYPTO_RINGS_FULL :  All DMA rings were full and non-blocking
1701 + *                              behaviour was requested.
1702 + *
1703 + *************************************************************************/
1704 +
1705 +/*
1706 + * Convert back and forth from 'sid' to 'card' and 'session'
1707 + */
1708 +#define HIFN_CARD(sid)         (((sid) & 0xf0000000) >> 28)
1709 +#define HIFN_SESSION(sid)      ((sid) & 0x000007ff)
1710 +#define HIFN_SID(crd,ses)      (((crd) << 28) | ((ses) & 0x7ff))
1711 +
1712 +#endif /* _KERNEL */
1713 +
1714 +struct hifn_stats {
1715 +       u_int64_t hst_ibytes;
1716 +       u_int64_t hst_obytes;
1717 +       u_int32_t hst_ipackets;
1718 +       u_int32_t hst_opackets;
1719 +       u_int32_t hst_invalid;
1720 +       u_int32_t hst_nomem;            /* malloc or one of hst_nomem_* */
1721 +       u_int32_t hst_abort;
1722 +       u_int32_t hst_noirq;            /* IRQ for no reason */
1723 +       u_int32_t hst_totbatch;         /* ops submitted w/o interrupt */
1724 +       u_int32_t hst_maxbatch;         /* max ops submitted together */
1725 +       u_int32_t hst_unaligned;        /* unaligned src caused copy */
1726 +       /*
1727 +        * The following divides hst_nomem into more specific buckets.
1728 +        */
1729 +       u_int32_t hst_nomem_map;        /* bus_dmamap_create failed */
1730 +       u_int32_t hst_nomem_load;       /* bus_dmamap_load_* failed */
1731 +       u_int32_t hst_nomem_mbuf;       /* MGET* failed */
1732 +       u_int32_t hst_nomem_mcl;        /* MCLGET* failed */
1733 +       u_int32_t hst_nomem_cr;         /* out of command/result descriptor */
1734 +       u_int32_t hst_nomem_sd;         /* out of src/dst descriptors */
1735 +};
1736 +
1737 +#endif /* __HIFN7751VAR_H__ */
1738 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
1739 +++ linux/crypto/ocf/hifn/hifn7751.c    2008-02-14 14:59:01.000000000 +1000
1740 @@ -0,0 +1,2970 @@
1741 +/*     $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $  */
1742 +
1743 +/*-
1744 + * Invertex AEON / Hifn 7751 driver
1745 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
1746 + * Copyright (c) 1999 Theo de Raadt
1747 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
1748 + *                     http://www.netsec.net
1749 + * Copyright (c) 2003 Hifn Inc.
1750 + *
1751 + * This driver is based on a previous driver by Invertex, for which they
1752 + * requested:  Please send any comments, feedback, bug-fixes, or feature
1753 + * requests to software@invertex.com.
1754 + *
1755 + * Redistribution and use in source and binary forms, with or without
1756 + * modification, are permitted provided that the following conditions
1757 + * are met:
1758 + *
1759 + * 1. Redistributions of source code must retain the above copyright
1760 + *   notice, this list of conditions and the following disclaimer.
1761 + * 2. Redistributions in binary form must reproduce the above copyright
1762 + *   notice, this list of conditions and the following disclaimer in the
1763 + *   documentation and/or other materials provided with the distribution.
1764 + * 3. The name of the author may not be used to endorse or promote products
1765 + *   derived from this software without specific prior written permission.
1766 + *
1767 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1768 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1769 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1770 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1771 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
1772 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1773 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1774 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1775 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
1776 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1777 + *
1778 + * Effort sponsored in part by the Defense Advanced Research Projects
1779 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
1780 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
1781 + *
1782 + *
1783 +__FBSDID("$FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.40 2007/03/21 03:42:49 sam Exp $");
1784 + */
1785 +
1786 +/*
1787 + * Driver for various Hifn encryption processors.
1788 + */
1789 +#ifndef AUTOCONF_INCLUDED
1790 +#include <linux/config.h>
1791 +#endif
1792 +#include <linux/module.h>
1793 +#include <linux/init.h>
1794 +#include <linux/list.h>
1795 +#include <linux/slab.h>
1796 +#include <linux/wait.h>
1797 +#include <linux/sched.h>
1798 +#include <linux/pci.h>
1799 +#include <linux/delay.h>
1800 +#include <linux/interrupt.h>
1801 +#include <linux/spinlock.h>
1802 +#include <linux/random.h>
1803 +#include <linux/version.h>
1804 +#include <linux/skbuff.h>
1805 +#include <asm/io.h>
1806 +
1807 +#include <cryptodev.h>
1808 +#include <uio.h>
1809 +#include <hifn/hifn7751reg.h>
1810 +#include <hifn/hifn7751var.h>
1811 +
1812 +#if 1
1813 +#define        DPRINTF(a...)   if (hifn_debug) { \
1814 +                                                       printk("%s: ", sc ? \
1815 +                                                               device_get_nameunit(sc->sc_dev) : "hifn"); \
1816 +                                                       printk(a); \
1817 +                                               } else
1818 +#else
1819 +#define        DPRINTF(a...)
1820 +#endif
1821 +
1822 +static inline int
1823 +pci_get_revid(struct pci_dev *dev)
1824 +{
1825 +       u8 rid = 0;
1826 +       pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
1827 +       return rid;
1828 +}
1829 +
1830 +static struct hifn_stats hifnstats;
1831 +
1832 +#define        debug hifn_debug
1833 +int hifn_debug = 0;
1834 +module_param(hifn_debug, int, 0644);
1835 +MODULE_PARM_DESC(hifn_debug, "Enable debug");
1836 +
1837 +int hifn_maxbatch = 1;
1838 +module_param(hifn_maxbatch, int, 0644);
1839 +MODULE_PARM_DESC(hifn_maxbatch, "max ops to batch w/o interrupt");
1840 +
1841 +#ifdef MODULE_PARM
1842 +char *hifn_pllconfig = NULL;
1843 +MODULE_PARM(hifn_pllconfig, "s");
1844 +#else
1845 +char hifn_pllconfig[32]; /* This setting is RO after loading */
1846 +module_param_string(hifn_pllconfig, hifn_pllconfig, 32, 0444);
1847 +#endif
1848 +MODULE_PARM_DESC(hifn_pllconfig, "PLL config, ie., pci66, ext33, ...");
1849 +
1850 +#ifdef HIFN_VULCANDEV
1851 +#include <sys/conf.h>
1852 +#include <sys/uio.h>
1853 +
1854 +static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
1855 +#endif
1856 +
1857 +/*
1858 + * Prototypes and count for the pci_device structure
1859 + */
1860 +static int  hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent);
1861 +static void hifn_remove(struct pci_dev *dev);
1862 +
1863 +static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
1864 +static int hifn_freesession(device_t, u_int64_t);
1865 +static int hifn_process(device_t, struct cryptop *, int);
1866 +
1867 +static device_method_t hifn_methods = {
1868 +       /* crypto device methods */
1869 +       DEVMETHOD(cryptodev_newsession, hifn_newsession),
1870 +       DEVMETHOD(cryptodev_freesession,hifn_freesession),
1871 +       DEVMETHOD(cryptodev_process,    hifn_process),
1872 +};
1873 +
1874 +static void hifn_reset_board(struct hifn_softc *, int);
1875 +static void hifn_reset_puc(struct hifn_softc *);
1876 +static void hifn_puc_wait(struct hifn_softc *);
1877 +static int hifn_enable_crypto(struct hifn_softc *);
1878 +static void hifn_set_retry(struct hifn_softc *sc);
1879 +static void hifn_init_dma(struct hifn_softc *);
1880 +static void hifn_init_pci_registers(struct hifn_softc *);
1881 +static int hifn_sramsize(struct hifn_softc *);
1882 +static int hifn_dramsize(struct hifn_softc *);
1883 +static int hifn_ramtype(struct hifn_softc *);
1884 +static void hifn_sessions(struct hifn_softc *);
1885 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
1886 +static irqreturn_t hifn_intr(int irq, void *arg);
1887 +#else
1888 +static irqreturn_t hifn_intr(int irq, void *arg, struct pt_regs *regs);
1889 +#endif
1890 +static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
1891 +static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
1892 +static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
1893 +static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
1894 +static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
1895 +static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
1896 +static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
1897 +static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
1898 +static int hifn_init_pubrng(struct hifn_softc *);
1899 +static void hifn_tick(unsigned long arg);
1900 +static void hifn_abort(struct hifn_softc *);
1901 +static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
1902 +
1903 +static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
1904 +static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
1905 +
1906 +#ifdef CONFIG_OCF_RANDOMHARVEST
1907 +static int hifn_read_random(void *arg, u_int32_t *buf, int len);
1908 +#endif
1909 +
1910 +#define HIFN_MAX_CHIPS 8
1911 +static struct hifn_softc *hifn_chip_idx[HIFN_MAX_CHIPS];
1912 +
1913 +static __inline u_int32_t
1914 +READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
1915 +{
1916 +       u_int32_t v = readl(sc->sc_bar0 + reg);
1917 +       sc->sc_bar0_lastreg = (bus_size_t) -1;
1918 +       return (v);
1919 +}
1920 +#define        WRITE_REG_0(sc, reg, val)       hifn_write_reg_0(sc, reg, val)
1921 +
1922 +static __inline u_int32_t
1923 +READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
1924 +{
1925 +       u_int32_t v = readl(sc->sc_bar1 + reg);
1926 +       sc->sc_bar1_lastreg = (bus_size_t) -1;
1927 +       return (v);
1928 +}
1929 +#define        WRITE_REG_1(sc, reg, val)       hifn_write_reg_1(sc, reg, val)
1930 +
1931 +/*
1932 + * map in a given buffer (great on some arches :-)
1933 + */
1934 +
1935 +static int
1936 +pci_map_uio(struct hifn_softc *sc, struct hifn_operand *buf, struct uio *uio)
1937 +{
1938 +       struct iovec *iov = uio->uio_iov;
1939 +
1940 +       DPRINTF("%s()\n", __FUNCTION__);
1941 +
1942 +       buf->mapsize = 0;
1943 +       for (buf->nsegs = 0; buf->nsegs < uio->uio_iovcnt; ) {
1944 +               buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
1945 +                               iov->iov_base, iov->iov_len,
1946 +                               PCI_DMA_BIDIRECTIONAL);
1947 +               buf->segs[buf->nsegs].ds_len = iov->iov_len;
1948 +               buf->mapsize += iov->iov_len;
1949 +               iov++;
1950 +               buf->nsegs++;
1951 +       }
1952 +       /* identify this buffer by the first segment */
1953 +       buf->map = (void *) buf->segs[0].ds_addr;
1954 +       return(0);
1955 +}
1956 +
1957 +/*
1958 + * map in a given sk_buff
1959 + */
1960 +
1961 +static int
1962 +pci_map_skb(struct hifn_softc *sc,struct hifn_operand *buf,struct sk_buff *skb)
1963 +{
1964 +       int i;
1965 +
1966 +       DPRINTF("%s()\n", __FUNCTION__);
1967 +
1968 +       buf->mapsize = 0;
1969 +
1970 +       buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
1971 +                       skb->data, skb_headlen(skb), PCI_DMA_BIDIRECTIONAL);
1972 +       buf->segs[0].ds_len = skb_headlen(skb);
1973 +       buf->mapsize += buf->segs[0].ds_len;
1974 +
1975 +       buf->nsegs = 1;
1976 +
1977 +       for (i = 0; i < skb_shinfo(skb)->nr_frags; ) {
1978 +               buf->segs[buf->nsegs].ds_len = skb_shinfo(skb)->frags[i].size;
1979 +               buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
1980 +                               page_address(skb_shinfo(skb)->frags[i].page) +
1981 +                                       skb_shinfo(skb)->frags[i].page_offset,
1982 +                               buf->segs[buf->nsegs].ds_len, PCI_DMA_BIDIRECTIONAL);
1983 +               buf->mapsize += buf->segs[buf->nsegs].ds_len;
1984 +               buf->nsegs++;
1985 +       }
1986 +
1987 +       /* identify this buffer by the first segment */
1988 +       buf->map = (void *) buf->segs[0].ds_addr;
1989 +       return(0);
1990 +}
1991 +
1992 +/*
1993 + * map in a given contiguous buffer
1994 + */
1995 +
1996 +static int
1997 +pci_map_buf(struct hifn_softc *sc,struct hifn_operand *buf, void *b, int len)
1998 +{
1999 +       DPRINTF("%s()\n", __FUNCTION__);
2000 +
2001 +       buf->mapsize = 0;
2002 +       buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
2003 +                       b, len, PCI_DMA_BIDIRECTIONAL);
2004 +       buf->segs[0].ds_len = len;
2005 +       buf->mapsize += buf->segs[0].ds_len;
2006 +       buf->nsegs = 1;
2007 +
2008 +       /* identify this buffer by the first segment */
2009 +       buf->map = (void *) buf->segs[0].ds_addr;
2010 +       return(0);
2011 +}
2012 +
2013 +#if 0 /* not needed at this time */
2014 +static void
2015 +pci_sync_iov(struct hifn_softc *sc, struct hifn_operand *buf)
2016 +{
2017 +       int i;
2018 +
2019 +       DPRINTF("%s()\n", __FUNCTION__);
2020 +       for (i = 0; i < buf->nsegs; i++)
2021 +               pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
2022 +                               buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
2023 +}
2024 +#endif
2025 +
2026 +static void
2027 +pci_unmap_buf(struct hifn_softc *sc, struct hifn_operand *buf)
2028 +{
2029 +       int i;
2030 +       DPRINTF("%s()\n", __FUNCTION__);
2031 +       for (i = 0; i < buf->nsegs; i++) {
2032 +               pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
2033 +                               buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
2034 +               buf->segs[i].ds_addr = 0;
2035 +               buf->segs[i].ds_len = 0;
2036 +       }
2037 +       buf->nsegs = 0;
2038 +       buf->mapsize = 0;
2039 +       buf->map = 0;
2040 +}
2041 +
2042 +static const char*
2043 +hifn_partname(struct hifn_softc *sc)
2044 +{
2045 +       /* XXX sprintf numbers when not decoded */
2046 +       switch (pci_get_vendor(sc->sc_pcidev)) {
2047 +       case PCI_VENDOR_HIFN:
2048 +               switch (pci_get_device(sc->sc_pcidev)) {
2049 +               case PCI_PRODUCT_HIFN_6500:     return "Hifn 6500";
2050 +               case PCI_PRODUCT_HIFN_7751:     return "Hifn 7751";
2051 +               case PCI_PRODUCT_HIFN_7811:     return "Hifn 7811";
2052 +               case PCI_PRODUCT_HIFN_7951:     return "Hifn 7951";
2053 +               case PCI_PRODUCT_HIFN_7955:     return "Hifn 7955";
2054 +               case PCI_PRODUCT_HIFN_7956:     return "Hifn 7956";
2055 +               }
2056 +               return "Hifn unknown-part";
2057 +       case PCI_VENDOR_INVERTEX:
2058 +               switch (pci_get_device(sc->sc_pcidev)) {
2059 +               case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
2060 +               }
2061 +               return "Invertex unknown-part";
2062 +       case PCI_VENDOR_NETSEC:
2063 +               switch (pci_get_device(sc->sc_pcidev)) {
2064 +               case PCI_PRODUCT_NETSEC_7751:   return "NetSec 7751";
2065 +               }
2066 +               return "NetSec unknown-part";
2067 +       }
2068 +       return "Unknown-vendor unknown-part";
2069 +}
2070 +
2071 +static u_int
2072 +checkmaxmin(struct pci_dev *dev, const char *what, u_int v, u_int min, u_int max)
2073 +{
2074 +       struct hifn_softc *sc = pci_get_drvdata(dev);
2075 +       if (v > max) {
2076 +               device_printf(sc->sc_dev, "Warning, %s %u out of range, "
2077 +                       "using max %u\n", what, v, max);
2078 +               v = max;
2079 +       } else if (v < min) {
2080 +               device_printf(sc->sc_dev, "Warning, %s %u out of range, "
2081 +                       "using min %u\n", what, v, min);
2082 +               v = min;
2083 +       }
2084 +       return v;
2085 +}
2086 +
2087 +/*
2088 + * Select PLL configuration for 795x parts.  This is complicated in
2089 + * that we cannot determine the optimal parameters without user input.
2090 + * The reference clock is derived from an external clock through a
2091 + * multiplier.  The external clock is either the host bus (i.e. PCI)
2092 + * or an external clock generator.  When using the PCI bus we assume
2093 + * the clock is either 33 or 66 MHz; for an external source we cannot
2094 + * tell the speed.
2095 + *
2096 + * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
2097 + * for an external source, followed by the frequency.  We calculate
2098 + * the appropriate multiplier and PLL register contents accordingly.
2099 + * When no configuration is given we default to "pci66" since that
2100 + * always will allow the card to work.  If a card is using the PCI
2101 + * bus clock and in a 33MHz slot then it will be operating at half
2102 + * speed until the correct information is provided.
2103 + *
2104 + * We use a default setting of "ext66" because according to Mike Ham
2105 + * of HiFn, almost every board in existence has an external crystal
2106 + * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
2107 + * because PCI33 can have clocks from 0 to 33Mhz, and some have
2108 + * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
2109 + */
2110 +static void
2111 +hifn_getpllconfig(struct pci_dev *dev, u_int *pll)
2112 +{
2113 +       const char *pllspec = hifn_pllconfig;
2114 +       u_int freq, mul, fl, fh;
2115 +       u_int32_t pllconfig;
2116 +       char *nxt;
2117 +
2118 +       if (pllspec == NULL)
2119 +               pllspec = "ext66";
2120 +       fl = 33, fh = 66;
2121 +       pllconfig = 0;
2122 +       if (strncmp(pllspec, "ext", 3) == 0) {
2123 +               pllspec += 3;
2124 +               pllconfig |= HIFN_PLL_REF_SEL;
2125 +               switch (pci_get_device(dev)) {
2126 +               case PCI_PRODUCT_HIFN_7955:
2127 +               case PCI_PRODUCT_HIFN_7956:
2128 +                       fl = 20, fh = 100;
2129 +                       break;
2130 +#ifdef notyet
2131 +               case PCI_PRODUCT_HIFN_7954:
2132 +                       fl = 20, fh = 66;
2133 +                       break;
2134 +#endif
2135 +               }
2136 +       } else if (strncmp(pllspec, "pci", 3) == 0)
2137 +               pllspec += 3;
2138 +       freq = strtoul(pllspec, &nxt, 10);
2139 +       if (nxt == pllspec)
2140 +               freq = 66;
2141 +       else
2142 +               freq = checkmaxmin(dev, "frequency", freq, fl, fh);
2143 +       /*
2144 +        * Calculate multiplier.  We target a Fck of 266 MHz,
2145 +        * allowing only even values, possibly rounded down.
2146 +        * Multipliers > 8 must set the charge pump current.
2147 +        */
2148 +       mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
2149 +       pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
2150 +       if (mul > 8)
2151 +               pllconfig |= HIFN_PLL_IS;
2152 +       *pll = pllconfig;
2153 +}
2154 +
2155 +/*
2156 + * Attach an interface that successfully probed.
2157 + */
2158 +static int
2159 +hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2160 +{
2161 +       struct hifn_softc *sc = NULL;
2162 +       char rbase;
2163 +       u_int16_t ena, rev;
2164 +       int rseg, rc;
2165 +       unsigned long mem_start, mem_len;
2166 +       static int num_chips = 0;
2167 +
2168 +       DPRINTF("%s()\n", __FUNCTION__);
2169 +
2170 +       if (pci_enable_device(dev) < 0)
2171 +               return(-ENODEV);
2172 +
2173 +       if (pci_set_mwi(dev))
2174 +               return(-ENODEV);
2175 +
2176 +       if (!dev->irq) {
2177 +               printk("hifn: found device with no IRQ assigned. check BIOS settings!");
2178 +               pci_disable_device(dev);
2179 +               return(-ENODEV);
2180 +       }
2181 +
2182 +       sc = (struct hifn_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
2183 +       if (!sc)
2184 +               return(-ENOMEM);
2185 +       memset(sc, 0, sizeof(*sc));
2186 +
2187 +       softc_device_init(sc, "hifn", num_chips, hifn_methods);
2188 +
2189 +       sc->sc_pcidev = dev;
2190 +       sc->sc_irq = -1;
2191 +       sc->sc_cid = -1;
2192 +       sc->sc_num = num_chips++;
2193 +       if (sc->sc_num < HIFN_MAX_CHIPS)
2194 +               hifn_chip_idx[sc->sc_num] = sc;
2195 +
2196 +       pci_set_drvdata(sc->sc_pcidev, sc);
2197 +
2198 +       spin_lock_init(&sc->sc_mtx);
2199 +
2200 +       /* XXX handle power management */
2201 +
2202 +       /*
2203 +        * The 7951 and 795x have a random number generator and
2204 +        * public key support; note this.
2205 +        */
2206 +       if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
2207 +           (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
2208 +            pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
2209 +            pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
2210 +               sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
2211 +       /*
2212 +        * The 7811 has a random number generator and
2213 +        * we also note it's identity 'cuz of some quirks.
2214 +        */
2215 +       if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
2216 +           pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
2217 +               sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
2218 +
2219 +       /*
2220 +        * The 795x parts support AES.
2221 +        */
2222 +       if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
2223 +           (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
2224 +            pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
2225 +               sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
2226 +               /*
2227 +                * Select PLL configuration.  This depends on the
2228 +                * bus and board design and must be manually configured
2229 +                * if the default setting is unacceptable.
2230 +                */
2231 +               hifn_getpllconfig(dev, &sc->sc_pllconfig);
2232 +       }
2233 +
2234 +       /*
2235 +        * Setup PCI resources. Note that we record the bus
2236 +        * tag and handle for each register mapping, this is
2237 +        * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
2238 +        * and WRITE_REG_1 macros throughout the driver.
2239 +        */
2240 +       mem_start = pci_resource_start(sc->sc_pcidev, 0);
2241 +       mem_len   = pci_resource_len(sc->sc_pcidev, 0);
2242 +       sc->sc_bar0 = (ocf_iomem_t) ioremap(mem_start, mem_len);
2243 +       if (!sc->sc_bar0) {
2244 +               device_printf(sc->sc_dev, "cannot map bar%d register space\n", 0);
2245 +               goto fail;
2246 +       }
2247 +       sc->sc_bar0_lastreg = (bus_size_t) -1;
2248 +
2249 +       mem_start = pci_resource_start(sc->sc_pcidev, 1);
2250 +       mem_len   = pci_resource_len(sc->sc_pcidev, 1);
2251 +       sc->sc_bar1 = (ocf_iomem_t) ioremap(mem_start, mem_len);
2252 +       if (!sc->sc_bar1) {
2253 +               device_printf(sc->sc_dev, "cannot map bar%d register space\n", 1);
2254 +               goto fail;
2255 +       }
2256 +       sc->sc_bar1_lastreg = (bus_size_t) -1;
2257 +
2258 +       /* fix up the bus size */
2259 +       if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
2260 +               device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
2261 +               goto fail;
2262 +       }
2263 +       if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
2264 +               device_printf(sc->sc_dev,
2265 +                               "No usable consistent DMA configuration, aborting.\n");
2266 +               goto fail;
2267 +       }
2268 +
2269 +       hifn_set_retry(sc);
2270 +
2271 +       /*
2272 +        * Setup the area where the Hifn DMA's descriptors
2273 +        * and associated data structures.
2274 +        */
2275 +       sc->sc_dma = (struct hifn_dma *) pci_alloc_consistent(dev,
2276 +                       sizeof(*sc->sc_dma),
2277 +                       &sc->sc_dma_physaddr);
2278 +       if (!sc->sc_dma) {
2279 +               device_printf(sc->sc_dev, "cannot alloc sc_dma\n");
2280 +               goto fail;
2281 +       }
2282 +       bzero(sc->sc_dma, sizeof(*sc->sc_dma));
2283 +
2284 +       /*
2285 +        * Reset the board and do the ``secret handshake''
2286 +        * to enable the crypto support.  Then complete the
2287 +        * initialization procedure by setting up the interrupt
2288 +        * and hooking in to the system crypto support so we'll
2289 +        * get used for system services like the crypto device,
2290 +        * IPsec, RNG device, etc.
2291 +        */
2292 +       hifn_reset_board(sc, 0);
2293 +
2294 +       if (hifn_enable_crypto(sc) != 0) {
2295 +               device_printf(sc->sc_dev, "crypto enabling failed\n");
2296 +               goto fail;
2297 +       }
2298 +       hifn_reset_puc(sc);
2299 +
2300 +       hifn_init_dma(sc);
2301 +       hifn_init_pci_registers(sc);
2302 +
2303 +       pci_set_master(sc->sc_pcidev);
2304 +
2305 +       /* XXX can't dynamically determine ram type for 795x; force dram */
2306 +       if (sc->sc_flags & HIFN_IS_7956)
2307 +               sc->sc_drammodel = 1;
2308 +       else if (hifn_ramtype(sc))
2309 +               goto fail;
2310 +
2311 +       if (sc->sc_drammodel == 0)
2312 +               hifn_sramsize(sc);
2313 +       else
2314 +               hifn_dramsize(sc);
2315 +
2316 +       /*
2317 +        * Workaround for NetSec 7751 rev A: half ram size because two
2318 +        * of the address lines were left floating
2319 +        */
2320 +       if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
2321 +           pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
2322 +           pci_get_revid(dev) == 0x61) /*XXX???*/
2323 +               sc->sc_ramsize >>= 1;
2324 +
2325 +       /*
2326 +        * Arrange the interrupt line.
2327 +        */
2328 +       rc = request_irq(dev->irq, hifn_intr, IRQF_SHARED, "hifn", sc);
2329 +       if (rc) {
2330 +               device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
2331 +               goto fail;
2332 +       }
2333 +       sc->sc_irq = dev->irq;
2334 +
2335 +       hifn_sessions(sc);
2336 +
2337 +       /*
2338 +        * NB: Keep only the low 16 bits; this masks the chip id
2339 +        *     from the 7951.
2340 +        */
2341 +       rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
2342 +
2343 +       rseg = sc->sc_ramsize / 1024;
2344 +       rbase = 'K';
2345 +       if (sc->sc_ramsize >= (1024 * 1024)) {
2346 +               rbase = 'M';
2347 +               rseg /= 1024;
2348 +       }
2349 +       device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
2350 +               hifn_partname(sc), rev,
2351 +               rseg, rbase, sc->sc_drammodel ? 'd' : 's');
2352 +       if (sc->sc_flags & HIFN_IS_7956)
2353 +               printf(", pll=0x%x<%s clk, %ux mult>",
2354 +                       sc->sc_pllconfig,
2355 +                       sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
2356 +                       2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
2357 +       printf("\n");
2358 +
2359 +       sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
2360 +       if (sc->sc_cid < 0) {
2361 +               device_printf(sc->sc_dev, "could not get crypto driver id\n");
2362 +               goto fail;
2363 +       }
2364 +
2365 +       WRITE_REG_0(sc, HIFN_0_PUCNFG,
2366 +           READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
2367 +       ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
2368 +
2369 +       switch (ena) {
2370 +       case HIFN_PUSTAT_ENA_2:
2371 +               crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
2372 +               crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
2373 +               if (sc->sc_flags & HIFN_HAS_AES)
2374 +                       crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
2375 +               /*FALLTHROUGH*/
2376 +       case HIFN_PUSTAT_ENA_1:
2377 +               crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
2378 +               crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
2379 +               crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
2380 +               crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
2381 +               crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
2382 +               break;
2383 +       }
2384 +
2385 +       if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
2386 +               hifn_init_pubrng(sc);
2387 +
2388 +       init_timer(&sc->sc_tickto);
2389 +       sc->sc_tickto.function = hifn_tick;
2390 +       sc->sc_tickto.data = (unsigned long) sc->sc_num;
2391 +       mod_timer(&sc->sc_tickto, jiffies + HZ);
2392 +
2393 +       return (0);
2394 +
2395 +fail:
2396 +    if (sc->sc_cid >= 0)
2397 +        crypto_unregister_all(sc->sc_cid);
2398 +    if (sc->sc_irq != -1)
2399 +        free_irq(sc->sc_irq, sc);
2400 +    if (sc->sc_dma) {
2401 +               /* Turn off DMA polling */
2402 +               WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2403 +                       HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2404 +
2405 +        pci_free_consistent(sc->sc_pcidev,
2406 +                               sizeof(*sc->sc_dma),
2407 +                sc->sc_dma, sc->sc_dma_physaddr);
2408 +       }
2409 +    kfree(sc);
2410 +       return (-ENXIO);
2411 +}
2412 +
2413 +/*
2414 + * Detach an interface that successfully probed.
2415 + */
2416 +static void
2417 +hifn_remove(struct pci_dev *dev)
2418 +{
2419 +       struct hifn_softc *sc = pci_get_drvdata(dev);
2420 +       unsigned long l_flags;
2421 +
2422 +       DPRINTF("%s()\n", __FUNCTION__);
2423 +
2424 +       KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
2425 +
2426 +       /* disable interrupts */
2427 +       HIFN_LOCK(sc);
2428 +       WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
2429 +       HIFN_UNLOCK(sc);
2430 +
2431 +       /*XXX other resources */
2432 +       del_timer_sync(&sc->sc_tickto);
2433 +
2434 +       /* Turn off DMA polling */
2435 +       WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2436 +           HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2437 +
2438 +       crypto_unregister_all(sc->sc_cid);
2439 +
2440 +       free_irq(sc->sc_irq, sc);
2441 +
2442 +       pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
2443 +                sc->sc_dma, sc->sc_dma_physaddr);
2444 +}
2445 +
2446 +
2447 +static int
2448 +hifn_init_pubrng(struct hifn_softc *sc)
2449 +{
2450 +       int i;
2451 +
2452 +       DPRINTF("%s()\n", __FUNCTION__);
2453 +
2454 +       if ((sc->sc_flags & HIFN_IS_7811) == 0) {
2455 +               /* Reset 7951 public key/rng engine */
2456 +               WRITE_REG_1(sc, HIFN_1_PUB_RESET,
2457 +                   READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
2458 +
2459 +               for (i = 0; i < 100; i++) {
2460 +                       DELAY(1000);
2461 +                       if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
2462 +                           HIFN_PUBRST_RESET) == 0)
2463 +                               break;
2464 +               }
2465 +
2466 +               if (i == 100) {
2467 +                       device_printf(sc->sc_dev, "public key init failed\n");
2468 +                       return (1);
2469 +               }
2470 +       }
2471 +
2472 +       /* Enable the rng, if available */
2473 +#ifdef CONFIG_OCF_RANDOMHARVEST
2474 +       if (sc->sc_flags & HIFN_HAS_RNG) {
2475 +               if (sc->sc_flags & HIFN_IS_7811) {
2476 +                       u_int32_t r;
2477 +                       r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
2478 +                       if (r & HIFN_7811_RNGENA_ENA) {
2479 +                               r &= ~HIFN_7811_RNGENA_ENA;
2480 +                               WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
2481 +                       }
2482 +                       WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
2483 +                           HIFN_7811_RNGCFG_DEFL);
2484 +                       r |= HIFN_7811_RNGENA_ENA;
2485 +                       WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
2486 +               } else
2487 +                       WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
2488 +                           READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
2489 +                           HIFN_RNGCFG_ENA);
2490 +
2491 +               sc->sc_rngfirst = 1;
2492 +               crypto_rregister(sc->sc_cid, hifn_read_random, sc);
2493 +       }
2494 +#endif
2495 +
2496 +       /* Enable public key engine, if available */
2497 +       if (sc->sc_flags & HIFN_HAS_PUBLIC) {
2498 +               WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
2499 +               sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
2500 +               WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2501 +#ifdef HIFN_VULCANDEV
2502 +               sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0, 
2503 +                                       UID_ROOT, GID_WHEEL, 0666,
2504 +                                       "vulcanpk");
2505 +               sc->sc_pkdev->si_drv1 = sc;
2506 +#endif
2507 +       }
2508 +
2509 +       return (0);
2510 +}
2511 +
2512 +#ifdef CONFIG_OCF_RANDOMHARVEST
2513 +static int
2514 +hifn_read_random(void *arg, u_int32_t *buf, int len)
2515 +{
2516 +       struct hifn_softc *sc = (struct hifn_softc *) arg;
2517 +       u_int32_t sts;
2518 +       int i, rc = 0;
2519 +
2520 +       if (len <= 0)
2521 +               return rc;
2522 +
2523 +       if (sc->sc_flags & HIFN_IS_7811) {
2524 +               /* ONLY VALID ON 7811!!!! */
2525 +               for (i = 0; i < 5; i++) {
2526 +                       sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
2527 +                       if (sts & HIFN_7811_RNGSTS_UFL) {
2528 +                               device_printf(sc->sc_dev,
2529 +                                             "RNG underflow: disabling\n");
2530 +                               /* DAVIDM perhaps return -1 */
2531 +                               break;
2532 +                       }
2533 +                       if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
2534 +                               break;
2535 +
2536 +                       /*
2537 +                        * There are at least two words in the RNG FIFO
2538 +                        * at this point.
2539 +                        */
2540 +                       if (rc < len)
2541 +                               buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
2542 +                       if (rc < len)
2543 +                               buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
2544 +               }
2545 +       } else
2546 +               buf[rc++] = READ_REG_1(sc, HIFN_1_RNG_DATA);
2547 +
2548 +       /* NB: discard first data read */
2549 +       if (sc->sc_rngfirst) {
2550 +               sc->sc_rngfirst = 0;
2551 +               rc = 0;
2552 +       }
2553 +
2554 +       return(rc);
2555 +}
2556 +#endif /* CONFIG_OCF_RANDOMHARVEST */
2557 +
2558 +static void
2559 +hifn_puc_wait(struct hifn_softc *sc)
2560 +{
2561 +       int i;
2562 +       int reg = HIFN_0_PUCTRL;
2563 +
2564 +       if (sc->sc_flags & HIFN_IS_7956) {
2565 +               reg = HIFN_0_PUCTRL2;
2566 +       }
2567 +
2568 +       for (i = 5000; i > 0; i--) {
2569 +               DELAY(1);
2570 +               if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
2571 +                       break;
2572 +       }
2573 +       if (!i)
2574 +               device_printf(sc->sc_dev, "proc unit did not reset(0x%x)\n",
2575 +                               READ_REG_0(sc, HIFN_0_PUCTRL));
2576 +}
2577 +
2578 +/*
2579 + * Reset the processing unit.
2580 + */
2581 +static void
2582 +hifn_reset_puc(struct hifn_softc *sc)
2583 +{
2584 +       /* Reset processing unit */
2585 +       int reg = HIFN_0_PUCTRL;
2586 +
2587 +       if (sc->sc_flags & HIFN_IS_7956) {
2588 +               reg = HIFN_0_PUCTRL2;
2589 +       }
2590 +       WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
2591 +
2592 +       hifn_puc_wait(sc);
2593 +}
2594 +
2595 +/*
2596 + * Set the Retry and TRDY registers; note that we set them to
2597 + * zero because the 7811 locks up when forced to retry (section
2598 + * 3.6 of "Specification Update SU-0014-04".  Not clear if we
2599 + * should do this for all Hifn parts, but it doesn't seem to hurt.
2600 + */
2601 +static void
2602 +hifn_set_retry(struct hifn_softc *sc)
2603 +{
2604 +       DPRINTF("%s()\n", __FUNCTION__);
2605 +       /* NB: RETRY only responds to 8-bit reads/writes */
2606 +       pci_write_config_byte(sc->sc_pcidev, HIFN_RETRY_TIMEOUT, 0);
2607 +       pci_write_config_dword(sc->sc_pcidev, HIFN_TRDY_TIMEOUT, 0);
2608 +}
2609 +
2610 +/*
2611 + * Resets the board.  Values in the regesters are left as is
2612 + * from the reset (i.e. initial values are assigned elsewhere).
2613 + */
2614 +static void
2615 +hifn_reset_board(struct hifn_softc *sc, int full)
2616 +{
2617 +       u_int32_t reg;
2618 +
2619 +       DPRINTF("%s()\n", __FUNCTION__);
2620 +       /*
2621 +        * Set polling in the DMA configuration register to zero.  0x7 avoids
2622 +        * resetting the board and zeros out the other fields.
2623 +        */
2624 +       WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2625 +           HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2626 +
2627 +       /*
2628 +        * Now that polling has been disabled, we have to wait 1 ms
2629 +        * before resetting the board.
2630 +        */
2631 +       DELAY(1000);
2632 +
2633 +       /* Reset the DMA unit */
2634 +       if (full) {
2635 +               WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
2636 +               DELAY(1000);
2637 +       } else {
2638 +               WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
2639 +                   HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
2640 +               hifn_reset_puc(sc);
2641 +       }
2642 +
2643 +       KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
2644 +       bzero(sc->sc_dma, sizeof(*sc->sc_dma));
2645 +
2646 +       /* Bring dma unit out of reset */
2647 +       WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2648 +           HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2649 +
2650 +       hifn_puc_wait(sc);
2651 +       hifn_set_retry(sc);
2652 +
2653 +       if (sc->sc_flags & HIFN_IS_7811) {
2654 +               for (reg = 0; reg < 1000; reg++) {
2655 +                       if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
2656 +                           HIFN_MIPSRST_CRAMINIT)
2657 +                               break;
2658 +                       DELAY(1000);
2659 +               }
2660 +               if (reg == 1000)
2661 +                       device_printf(sc->sc_dev, ": cram init timeout\n");
2662 +       } else {
2663 +         /* set up DMA configuration register #2 */
2664 +         /* turn off all PK and BAR0 swaps */
2665 +         WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
2666 +                     (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
2667 +                     (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
2668 +                     (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
2669 +                     (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
2670 +       }
2671 +}
2672 +
2673 +static u_int32_t
2674 +hifn_next_signature(u_int32_t a, u_int cnt)
2675 +{
2676 +       int i;
2677 +       u_int32_t v;
2678 +
2679 +       for (i = 0; i < cnt; i++) {
2680 +
2681 +               /* get the parity */
2682 +               v = a & 0x80080125;
2683 +               v ^= v >> 16;
2684 +               v ^= v >> 8;
2685 +               v ^= v >> 4;
2686 +               v ^= v >> 2;
2687 +               v ^= v >> 1;
2688 +
2689 +               a = (v & 1) ^ (a << 1);
2690 +       }
2691 +
2692 +       return a;
2693 +}
2694 +
2695 +
2696 +/*
2697 + * Checks to see if crypto is already enabled.  If crypto isn't enable,
2698 + * "hifn_enable_crypto" is called to enable it.  The check is important,
2699 + * as enabling crypto twice will lock the board.
2700 + */
2701 +static int 
2702 +hifn_enable_crypto(struct hifn_softc *sc)
2703 +{
2704 +       u_int32_t dmacfg, ramcfg, encl, addr, i;
2705 +       char offtbl[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2706 +                                         0x00, 0x00, 0x00, 0x00 };
2707 +
2708 +       DPRINTF("%s()\n", __FUNCTION__);
2709 +
2710 +       ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
2711 +       dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
2712 +
2713 +       /*
2714 +        * The RAM config register's encrypt level bit needs to be set before
2715 +        * every read performed on the encryption level register.
2716 +        */
2717 +       WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
2718 +
2719 +       encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
2720 +
2721 +       /*
2722 +        * Make sure we don't re-unlock.  Two unlocks kills chip until the
2723 +        * next reboot.
2724 +        */
2725 +       if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
2726 +#ifdef HIFN_DEBUG
2727 +               if (hifn_debug)
2728 +                       device_printf(sc->sc_dev,
2729 +                           "Strong crypto already enabled!\n");
2730 +#endif
2731 +               goto report;
2732 +       }
2733 +
2734 +       if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
2735 +#ifdef HIFN_DEBUG
2736 +               if (hifn_debug)
2737 +                       device_printf(sc->sc_dev,
2738 +                             "Unknown encryption level 0x%x\n", encl);
2739 +#endif
2740 +               return 1;
2741 +       }
2742 +
2743 +       WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
2744 +           HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2745 +       DELAY(1000);
2746 +       addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
2747 +       DELAY(1000);
2748 +       WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
2749 +       DELAY(1000);
2750 +
2751 +       for (i = 0; i <= 12; i++) {
2752 +               addr = hifn_next_signature(addr, offtbl[i] + 0x101);
2753 +               WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
2754 +
2755 +               DELAY(1000);
2756 +       }
2757 +
2758 +       WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
2759 +       encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
2760 +
2761 +#ifdef HIFN_DEBUG
2762 +       if (hifn_debug) {
2763 +               if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
2764 +                       device_printf(sc->sc_dev, "Engine is permanently "
2765 +                               "locked until next system reset!\n");
2766 +               else
2767 +                       device_printf(sc->sc_dev, "Engine enabled "
2768 +                               "successfully!\n");
2769 +       }
2770 +#endif
2771 +
2772 +report:
2773 +       WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
2774 +       WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
2775 +
2776 +       switch (encl) {
2777 +       case HIFN_PUSTAT_ENA_1:
2778 +       case HIFN_PUSTAT_ENA_2:
2779 +               break;
2780 +       case HIFN_PUSTAT_ENA_0:
2781 +       default:
2782 +               device_printf(sc->sc_dev, "disabled\n");
2783 +               break;
2784 +       }
2785 +
2786 +       return 0;
2787 +}
2788 +
2789 +/*
2790 + * Give initial values to the registers listed in the "Register Space"
2791 + * section of the HIFN Software Development reference manual.
2792 + */
2793 +static void 
2794 +hifn_init_pci_registers(struct hifn_softc *sc)
2795 +{
2796 +       DPRINTF("%s()\n", __FUNCTION__);
2797 +
2798 +       /* write fixed values needed by the Initialization registers */
2799 +       WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
2800 +       WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
2801 +       WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
2802 +
2803 +       /* write all 4 ring address registers */
2804 +       WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
2805 +           offsetof(struct hifn_dma, cmdr[0]));
2806 +       WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
2807 +           offsetof(struct hifn_dma, srcr[0]));
2808 +       WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
2809 +           offsetof(struct hifn_dma, dstr[0]));
2810 +       WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
2811 +           offsetof(struct hifn_dma, resr[0]));
2812 +
2813 +       DELAY(2000);
2814 +
2815 +       /* write status register */
2816 +       WRITE_REG_1(sc, HIFN_1_DMA_CSR,
2817 +           HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
2818 +           HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
2819 +           HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
2820 +           HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
2821 +           HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
2822 +           HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
2823 +           HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
2824 +           HIFN_DMACSR_S_WAIT |
2825 +           HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
2826 +           HIFN_DMACSR_C_WAIT |
2827 +           HIFN_DMACSR_ENGINE |
2828 +           ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
2829 +               HIFN_DMACSR_PUBDONE : 0) |
2830 +           ((sc->sc_flags & HIFN_IS_7811) ?
2831 +               HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
2832 +
2833 +       sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
2834 +       sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
2835 +           HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
2836 +           HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
2837 +           ((sc->sc_flags & HIFN_IS_7811) ?
2838 +               HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
2839 +       sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2840 +       WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2841 +
2842 +
2843 +       if (sc->sc_flags & HIFN_IS_7956) {
2844 +               u_int32_t pll;
2845 +
2846 +               WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
2847 +                   HIFN_PUCNFG_TCALLPHASES |
2848 +                   HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
2849 +
2850 +               /* turn off the clocks and insure bypass is set */
2851 +               pll = READ_REG_1(sc, HIFN_1_PLL);
2852 +               pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
2853 +                 | HIFN_PLL_BP | HIFN_PLL_MBSET;
2854 +               WRITE_REG_1(sc, HIFN_1_PLL, pll);
2855 +               DELAY(10*1000);         /* 10ms */
2856 +
2857 +               /* change configuration */
2858 +               pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
2859 +               WRITE_REG_1(sc, HIFN_1_PLL, pll);
2860 +               DELAY(10*1000);         /* 10ms */
2861 +
2862 +               /* disable bypass */
2863 +               pll &= ~HIFN_PLL_BP;
2864 +               WRITE_REG_1(sc, HIFN_1_PLL, pll);
2865 +               /* enable clocks with new configuration */
2866 +               pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
2867 +               WRITE_REG_1(sc, HIFN_1_PLL, pll);
2868 +       } else {
2869 +               WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
2870 +                   HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
2871 +                   HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
2872 +                   (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
2873 +       }
2874 +
2875 +       WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
2876 +       WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2877 +           HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
2878 +           ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
2879 +           ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
2880 +}
2881 +
2882 +/*
2883 + * The maximum number of sessions supported by the card
2884 + * is dependent on the amount of context ram, which
2885 + * encryption algorithms are enabled, and how compression
2886 + * is configured.  This should be configured before this
2887 + * routine is called.
2888 + */
2889 +static void
2890 +hifn_sessions(struct hifn_softc *sc)
2891 +{
2892 +       u_int32_t pucnfg;
2893 +       int ctxsize;
2894 +
2895 +       DPRINTF("%s()\n", __FUNCTION__);
2896 +
2897 +       pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
2898 +
2899 +       if (pucnfg & HIFN_PUCNFG_COMPSING) {
2900 +               if (pucnfg & HIFN_PUCNFG_ENCCNFG)
2901 +                       ctxsize = 128;
2902 +               else
2903 +                       ctxsize = 512;
2904 +               /*
2905 +                * 7955/7956 has internal context memory of 32K
2906 +                */
2907 +               if (sc->sc_flags & HIFN_IS_7956)
2908 +                       sc->sc_maxses = 32768 / ctxsize;
2909 +               else
2910 +                       sc->sc_maxses = 1 +
2911 +                           ((sc->sc_ramsize - 32768) / ctxsize);
2912 +       } else
2913 +               sc->sc_maxses = sc->sc_ramsize / 16384;
2914 +
2915 +       if (sc->sc_maxses > 2048)
2916 +               sc->sc_maxses = 2048;
2917 +}
2918 +
2919 +/*
2920 + * Determine ram type (sram or dram).  Board should be just out of a reset
2921 + * state when this is called.
2922 + */
2923 +static int
2924 +hifn_ramtype(struct hifn_softc *sc)
2925 +{
2926 +       u_int8_t data[8], dataexpect[8];
2927 +       int i;
2928 +
2929 +       for (i = 0; i < sizeof(data); i++)
2930 +               data[i] = dataexpect[i] = 0x55;
2931 +       if (hifn_writeramaddr(sc, 0, data))
2932 +               return (-1);
2933 +       if (hifn_readramaddr(sc, 0, data))
2934 +               return (-1);
2935 +       if (bcmp(data, dataexpect, sizeof(data)) != 0) {
2936 +               sc->sc_drammodel = 1;
2937 +               return (0);
2938 +       }
2939 +
2940 +       for (i = 0; i < sizeof(data); i++)
2941 +               data[i] = dataexpect[i] = 0xaa;
2942 +       if (hifn_writeramaddr(sc, 0, data))
2943 +               return (-1);
2944 +       if (hifn_readramaddr(sc, 0, data))
2945 +               return (-1);
2946 +       if (bcmp(data, dataexpect, sizeof(data)) != 0) {
2947 +               sc->sc_drammodel = 1;
2948 +               return (0);
2949 +       }
2950 +
2951 +       return (0);
2952 +}
2953 +
2954 +#define        HIFN_SRAM_MAX           (32 << 20)
2955 +#define        HIFN_SRAM_STEP_SIZE     16384
2956 +#define        HIFN_SRAM_GRANULARITY   (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
2957 +
2958 +static int
2959 +hifn_sramsize(struct hifn_softc *sc)
2960 +{
2961 +       u_int32_t a;
2962 +       u_int8_t data[8];
2963 +       u_int8_t dataexpect[sizeof(data)];
2964 +       int32_t i;
2965 +
2966 +       for (i = 0; i < sizeof(data); i++)
2967 +               data[i] = dataexpect[i] = i ^ 0x5a;
2968 +
2969 +       for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
2970 +               a = i * HIFN_SRAM_STEP_SIZE;
2971 +               bcopy(&i, data, sizeof(i));
2972 +               hifn_writeramaddr(sc, a, data);
2973 +       }
2974 +
2975 +       for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
2976 +               a = i * HIFN_SRAM_STEP_SIZE;
2977 +               bcopy(&i, dataexpect, sizeof(i));
2978 +               if (hifn_readramaddr(sc, a, data) < 0)
2979 +                       return (0);
2980 +               if (bcmp(data, dataexpect, sizeof(data)) != 0)
2981 +                       return (0);
2982 +               sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
2983 +       }
2984 +
2985 +       return (0);
2986 +}
2987 +
2988 +/*
2989 + * XXX For dram boards, one should really try all of the
2990 + * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
2991 + * is already set up correctly.
2992 + */
2993 +static int
2994 +hifn_dramsize(struct hifn_softc *sc)
2995 +{
2996 +       u_int32_t cnfg;
2997 +
2998 +       if (sc->sc_flags & HIFN_IS_7956) {
2999 +               /*
3000 +                * 7955/7956 have a fixed internal ram of only 32K.
3001 +                */
3002 +               sc->sc_ramsize = 32768;
3003 +       } else {
3004 +               cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
3005 +                   HIFN_PUCNFG_DRAMMASK;
3006 +               sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
3007 +       }
3008 +       return (0);
3009 +}
3010 +
3011 +static void
3012 +hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
3013 +{
3014 +       struct hifn_dma *dma = sc->sc_dma;
3015 +
3016 +       DPRINTF("%s()\n", __FUNCTION__);
3017 +
3018 +       if (dma->cmdi == HIFN_D_CMD_RSIZE) {
3019 +               dma->cmdi = 0;
3020 +               dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3021 +               wmb();
3022 +               dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
3023 +               HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
3024 +                   BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3025 +       }
3026 +       *cmdp = dma->cmdi++;
3027 +       dma->cmdk = dma->cmdi;
3028 +
3029 +       if (dma->srci == HIFN_D_SRC_RSIZE) {
3030 +               dma->srci = 0;
3031 +               dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3032 +               wmb();
3033 +               dma->srcr[HIFN_D_SRC_RSIZE].l |= htole32(HIFN_D_VALID);
3034 +               HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
3035 +                   BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3036 +       }
3037 +       *srcp = dma->srci++;
3038 +       dma->srck = dma->srci;
3039 +
3040 +       if (dma->dsti == HIFN_D_DST_RSIZE) {
3041 +               dma->dsti = 0;
3042 +               dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3043 +               wmb();
3044 +               dma->dstr[HIFN_D_DST_RSIZE].l |= htole32(HIFN_D_VALID);
3045 +               HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
3046 +                   BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3047 +       }
3048 +       *dstp = dma->dsti++;
3049 +       dma->dstk = dma->dsti;
3050 +
3051 +       if (dma->resi == HIFN_D_RES_RSIZE) {
3052 +               dma->resi = 0;
3053 +               dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3054 +               wmb();
3055 +               dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
3056 +               HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
3057 +                   BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3058 +       }
3059 +       *resp = dma->resi++;
3060 +       dma->resk = dma->resi;
3061 +}
3062 +
3063 +static int
3064 +hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
3065 +{
3066 +       struct hifn_dma *dma = sc->sc_dma;
3067 +       hifn_base_command_t wc;
3068 +       const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
3069 +       int r, cmdi, resi, srci, dsti;
3070 +
3071 +       DPRINTF("%s()\n", __FUNCTION__);
3072 +
3073 +       wc.masks = htole16(3 << 13);
3074 +       wc.session_num = htole16(addr >> 14);
3075 +       wc.total_source_count = htole16(8);
3076 +       wc.total_dest_count = htole16(addr & 0x3fff);
3077 +
3078 +       hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
3079 +
3080 +       WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3081 +           HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
3082 +           HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
3083 +
3084 +       /* build write command */
3085 +       bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
3086 +       *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
3087 +       bcopy(data, &dma->test_src, sizeof(dma->test_src));
3088 +
3089 +       dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
3090 +           + offsetof(struct hifn_dma, test_src));
3091 +       dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
3092 +           + offsetof(struct hifn_dma, test_dst));
3093 +
3094 +       dma->cmdr[cmdi].l = htole32(16 | masks);
3095 +       dma->srcr[srci].l = htole32(8 | masks);
3096 +       dma->dstr[dsti].l = htole32(4 | masks);
3097 +       dma->resr[resi].l = htole32(4 | masks);
3098 +
3099 +       for (r = 10000; r >= 0; r--) {
3100 +               DELAY(10);
3101 +               if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
3102 +                       break;
3103 +       }
3104 +       if (r == 0) {
3105 +               device_printf(sc->sc_dev, "writeramaddr -- "
3106 +                   "result[%d](addr %d) still valid\n", resi, addr);
3107 +               r = -1;
3108 +               return (-1);
3109 +       } else
3110 +               r = 0;
3111 +
3112 +       WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3113 +           HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
3114 +           HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
3115 +
3116 +       return (r);
3117 +}
3118 +
3119 +static int
3120 +hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
3121 +{
3122 +       struct hifn_dma *dma = sc->sc_dma;
3123 +       hifn_base_command_t rc;
3124 +       const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
3125 +       int r, cmdi, srci, dsti, resi;
3126 +
3127 +       DPRINTF("%s()\n", __FUNCTION__);
3128 +
3129 +       rc.masks = htole16(2 << 13);
3130 +       rc.session_num = htole16(addr >> 14);
3131 +       rc.total_source_count = htole16(addr & 0x3fff);
3132 +       rc.total_dest_count = htole16(8);
3133 +
3134 +       hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
3135 +
3136 +       WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3137 +           HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
3138 +           HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
3139 +
3140 +       bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
3141 +       *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
3142 +
3143 +       dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
3144 +           offsetof(struct hifn_dma, test_src));
3145 +       dma->test_src = 0;
3146 +       dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
3147 +           offsetof(struct hifn_dma, test_dst));
3148 +       dma->test_dst = 0;
3149 +       dma->cmdr[cmdi].l = htole32(8 | masks);
3150 +       dma->srcr[srci].l = htole32(8 | masks);
3151 +       dma->dstr[dsti].l = htole32(8 | masks);
3152 +       dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
3153 +
3154 +       for (r = 10000; r >= 0; r--) {
3155 +               DELAY(10);
3156 +               if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
3157 +                       break;
3158 +       }
3159 +       if (r == 0) {
3160 +               device_printf(sc->sc_dev, "readramaddr -- "
3161 +                   "result[%d](addr %d) still valid\n", resi, addr);
3162 +               r = -1;
3163 +       } else {
3164 +               r = 0;
3165 +               bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
3166 +       }
3167 +
3168 +       WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3169 +           HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
3170 +           HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
3171 +
3172 +       return (r);
3173 +}
3174 +
3175 +/*
3176 + * Initialize the descriptor rings.
3177 + */
3178 +static void 
3179 +hifn_init_dma(struct hifn_softc *sc)
3180 +{
3181 +       struct hifn_dma *dma = sc->sc_dma;
3182 +       int i;
3183 +
3184 +       DPRINTF("%s()\n", __FUNCTION__);
3185 +
3186 +       hifn_set_retry(sc);
3187 +
3188 +       /* initialize static pointer values */
3189 +       for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
3190 +               dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
3191 +                   offsetof(struct hifn_dma, command_bufs[i][0]));
3192 +       for (i = 0; i < HIFN_D_RES_RSIZE; i++)
3193 +               dma->resr[i].p = htole32(sc->sc_dma_physaddr +
3194 +                   offsetof(struct hifn_dma, result_bufs[i][0]));
3195 +
3196 +       dma->cmdr[HIFN_D_CMD_RSIZE].p =
3197 +           htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
3198 +       dma->srcr[HIFN_D_SRC_RSIZE].p =
3199 +           htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
3200 +       dma->dstr[HIFN_D_DST_RSIZE].p =
3201 +           htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
3202 +       dma->resr[HIFN_D_RES_RSIZE].p =
3203 +           htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
3204 +
3205 +       dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
3206 +       dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
3207 +       dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
3208 +}
3209 +
3210 +/*
3211 + * Writes out the raw command buffer space.  Returns the
3212 + * command buffer size.
3213 + */
3214 +static u_int
3215 +hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
3216 +{
3217 +       struct hifn_softc *sc = NULL;
3218 +       u_int8_t *buf_pos;
3219 +       hifn_base_command_t *base_cmd;
3220 +       hifn_mac_command_t *mac_cmd;
3221 +       hifn_crypt_command_t *cry_cmd;
3222 +       int using_mac, using_crypt, len, ivlen;
3223 +       u_int32_t dlen, slen;
3224 +
3225 +       DPRINTF("%s()\n", __FUNCTION__);
3226 +
3227 +       buf_pos = buf;
3228 +       using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
3229 +       using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
3230 +
3231 +       base_cmd = (hifn_base_command_t *)buf_pos;
3232 +       base_cmd->masks = htole16(cmd->base_masks);
3233 +       slen = cmd->src_mapsize;
3234 +       if (cmd->sloplen)
3235 +               dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
3236 +       else
3237 +               dlen = cmd->dst_mapsize;
3238 +       base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
3239 +       base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
3240 +       dlen >>= 16;
3241 +       slen >>= 16;
3242 +       base_cmd->session_num = htole16(
3243 +           ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
3244 +           ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
3245 +       buf_pos += sizeof(hifn_base_command_t);
3246 +
3247 +       if (using_mac) {
3248 +               mac_cmd = (hifn_mac_command_t *)buf_pos;
3249 +               dlen = cmd->maccrd->crd_len;
3250 +               mac_cmd->source_count = htole16(dlen & 0xffff);
3251 +               dlen >>= 16;
3252 +               mac_cmd->masks = htole16(cmd->mac_masks |
3253 +                   ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
3254 +               mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
3255 +               mac_cmd->reserved = 0;
3256 +               buf_pos += sizeof(hifn_mac_command_t);
3257 +       }
3258 +
3259 +       if (using_crypt) {
3260 +               cry_cmd = (hifn_crypt_command_t *)buf_pos;
3261 +               dlen = cmd->enccrd->crd_len;
3262 +               cry_cmd->source_count = htole16(dlen & 0xffff);
3263 +               dlen >>= 16;
3264 +               cry_cmd->masks = htole16(cmd->cry_masks |
3265 +                   ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
3266 +               cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
3267 +               cry_cmd->reserved = 0;
3268 +               buf_pos += sizeof(hifn_crypt_command_t);
3269 +       }
3270 +
3271 +       if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
3272 +               bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
3273 +               buf_pos += HIFN_MAC_KEY_LENGTH;
3274 +       }
3275 +
3276 +       if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
3277 +               switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
3278 +               case HIFN_CRYPT_CMD_ALG_3DES:
3279 +                       bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
3280 +                       buf_pos += HIFN_3DES_KEY_LENGTH;
3281 +                       break;
3282 +               case HIFN_CRYPT_CMD_ALG_DES:
3283 +                       bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
3284 +                       buf_pos += HIFN_DES_KEY_LENGTH;
3285 +                       break;
3286 +               case HIFN_CRYPT_CMD_ALG_RC4:
3287 +                       len = 256;
3288 +                       do {
3289 +                               int clen;
3290 +
3291 +                               clen = MIN(cmd->cklen, len);
3292 +                               bcopy(cmd->ck, buf_pos, clen);
3293 +                               len -= clen;
3294 +                               buf_pos += clen;
3295 +                       } while (len > 0);
3296 +                       bzero(buf_pos, 4);
3297 +                       buf_pos += 4;
3298 +                       break;
3299 +               case HIFN_CRYPT_CMD_ALG_AES:
3300 +                       /*
3301 +                        * AES keys are variable 128, 192 and
3302 +                        * 256 bits (16, 24 and 32 bytes).
3303 +                        */
3304 +                       bcopy(cmd->ck, buf_pos, cmd->cklen);
3305 +                       buf_pos += cmd->cklen;
3306 +                       break;
3307 +               }
3308 +       }
3309 +
3310 +       if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
3311 +               switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
3312 +               case HIFN_CRYPT_CMD_ALG_AES:
3313 +                       ivlen = HIFN_AES_IV_LENGTH;
3314 +                       break;
3315 +               default:
3316 +                       ivlen = HIFN_IV_LENGTH;
3317 +                       break;
3318 +               }
3319 +               bcopy(cmd->iv, buf_pos, ivlen);
3320 +               buf_pos += ivlen;
3321 +       }
3322 +
3323 +       if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
3324 +               bzero(buf_pos, 8);
3325 +               buf_pos += 8;
3326 +       }
3327 +
3328 +       return (buf_pos - buf);
3329 +}
3330 +
3331 +static int
3332 +hifn_dmamap_aligned(struct hifn_operand *op)
3333 +{
3334 +       struct hifn_softc *sc = NULL;
3335 +       int i;
3336 +
3337 +       DPRINTF("%s()\n", __FUNCTION__);
3338 +
3339 +       for (i = 0; i < op->nsegs; i++) {
3340 +               if (op->segs[i].ds_addr & 3)
3341 +                       return (0);
3342 +               if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
3343 +                       return (0);
3344 +       }
3345 +       return (1);
3346 +}
3347 +
3348 +static __inline int
3349 +hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
3350 +{
3351 +       struct hifn_dma *dma = sc->sc_dma;
3352 +
3353 +       if (++idx == HIFN_D_DST_RSIZE) {
3354 +               dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
3355 +                   HIFN_D_MASKDONEIRQ);
3356 +               HIFN_DSTR_SYNC(sc, idx,
3357 +                   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3358 +               idx = 0;
3359 +       }
3360 +       return (idx);
3361 +}
3362 +
3363 +static int
3364 +hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
3365 +{
3366 +       struct hifn_dma *dma = sc->sc_dma;
3367 +       struct hifn_operand *dst = &cmd->dst;
3368 +       u_int32_t p, l;
3369 +       int idx, used = 0, i;
3370 +
3371 +       DPRINTF("%s()\n", __FUNCTION__);
3372 +
3373 +       idx = dma->dsti;
3374 +       for (i = 0; i < dst->nsegs - 1; i++) {
3375 +               dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
3376 +               dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
3377 +               wmb();
3378 +               dma->dstr[idx].l |= htole32(HIFN_D_VALID);
3379 +               HIFN_DSTR_SYNC(sc, idx,
3380 +                   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3381 +               used++;
3382 +
3383 +               idx = hifn_dmamap_dstwrap(sc, idx);
3384 +       }
3385 +
3386 +       if (cmd->sloplen == 0) {
3387 +               p = dst->segs[i].ds_addr;
3388 +               l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
3389 +                   dst->segs[i].ds_len;
3390 +       } else {
3391 +               p = sc->sc_dma_physaddr +
3392 +                   offsetof(struct hifn_dma, slop[cmd->slopidx]);
3393 +               l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
3394 +                   sizeof(u_int32_t);
3395 +
3396 +               if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
3397 +                       dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
3398 +                       dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ |
3399 +                           (dst->segs[i].ds_len - cmd->sloplen));
3400 +                       wmb();
3401 +                       dma->dstr[idx].l |= htole32(HIFN_D_VALID);
3402 +                       HIFN_DSTR_SYNC(sc, idx,
3403 +                           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3404 +                       used++;
3405 +
3406 +                       idx = hifn_dmamap_dstwrap(sc, idx);
3407 +               }
3408 +       }
3409 +       dma->dstr[idx].p = htole32(p);
3410 +       dma->dstr[idx].l = htole32(l);
3411 +       wmb();
3412 +       dma->dstr[idx].l |= htole32(HIFN_D_VALID);
3413 +       HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3414 +       used++;
3415 +
3416 +       idx = hifn_dmamap_dstwrap(sc, idx);
3417 +
3418 +       dma->dsti = idx;
3419 +       dma->dstu += used;
3420 +       return (idx);
3421 +}
3422 +
3423 +static __inline int
3424 +hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
3425 +{
3426 +       struct hifn_dma *dma = sc->sc_dma;
3427 +
3428 +       if (++idx == HIFN_D_SRC_RSIZE) {
3429 +               dma->srcr[idx].l = htole32(HIFN_D_VALID |
3430 +                   HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
3431 +               HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
3432 +                   BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3433 +               idx = 0;
3434 +       }
3435 +       return (idx);
3436 +}
3437 +
3438 +static int
3439 +hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
3440 +{
3441 +       struct hifn_dma *dma = sc->sc_dma;
3442 +       struct hifn_operand *src = &cmd->src;
3443 +       int idx, i;
3444 +       u_int32_t last = 0;
3445 +
3446 +       DPRINTF("%s()\n", __FUNCTION__);
3447 +
3448 +       idx = dma->srci;
3449 +       for (i = 0; i < src->nsegs; i++) {
3450 +               if (i == src->nsegs - 1)
3451 +                       last = HIFN_D_LAST;
3452 +
3453 +               dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
3454 +               dma->srcr[idx].l = htole32(src->segs[i].ds_len |
3455 +                   HIFN_D_MASKDONEIRQ | last);
3456 +               wmb();
3457 +               dma->srcr[idx].l |= htole32(HIFN_D_VALID);
3458 +               HIFN_SRCR_SYNC(sc, idx,
3459 +                   BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3460 +
3461 +               idx = hifn_dmamap_srcwrap(sc, idx);
3462 +       }
3463 +       dma->srci = idx;
3464 +       dma->srcu += src->nsegs;
3465 +       return (idx);
3466 +} 
3467 +
3468 +
3469 +static int 
3470 +hifn_crypto(
3471 +       struct hifn_softc *sc,
3472 +       struct hifn_command *cmd,
3473 +       struct cryptop *crp,
3474 +       int hint)
3475 +{
3476 +       struct  hifn_dma *dma = sc->sc_dma;
3477 +       u_int32_t cmdlen, csr;
3478 +       int cmdi, resi, err = 0;
3479 +       unsigned long l_flags;
3480 +
3481 +       DPRINTF("%s()\n", __FUNCTION__);
3482 +
3483 +       /*
3484 +        * need 1 cmd, and 1 res
3485 +        *
3486 +        * NB: check this first since it's easy.
3487 +        */
3488 +       HIFN_LOCK(sc);
3489 +       if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
3490 +           (dma->resu + 1) > HIFN_D_RES_RSIZE) {
3491 +#ifdef HIFN_DEBUG
3492 +               if (hifn_debug) {
3493 +                       device_printf(sc->sc_dev,
3494 +                               "cmd/result exhaustion, cmdu %u resu %u\n",
3495 +                               dma->cmdu, dma->resu);
3496 +               }
3497 +#endif
3498 +               hifnstats.hst_nomem_cr++;
3499 +               sc->sc_needwakeup |= CRYPTO_SYMQ;
3500 +               HIFN_UNLOCK(sc);
3501 +               return (ERESTART);
3502 +       }
3503 +
3504 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
3505 +               if (pci_map_skb(sc, &cmd->src, cmd->src_skb)) {
3506 +                       hifnstats.hst_nomem_load++;
3507 +                       err = ENOMEM;
3508 +                       goto err_srcmap1;
3509 +               }
3510 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
3511 +               if (pci_map_uio(sc, &cmd->src, cmd->src_io)) {
3512 +                       hifnstats.hst_nomem_load++;
3513 +                       err = ENOMEM;
3514 +                       goto err_srcmap1;
3515 +               }
3516 +       } else {
3517 +               if (pci_map_buf(sc, &cmd->src, cmd->src_buf, crp->crp_ilen)) {
3518 +                       hifnstats.hst_nomem_load++;
3519 +                       err = ENOMEM;
3520 +                       goto err_srcmap1;
3521 +               }
3522 +       }
3523 +
3524 +       if (hifn_dmamap_aligned(&cmd->src)) {
3525 +               cmd->sloplen = cmd->src_mapsize & 3;
3526 +               cmd->dst = cmd->src;
3527 +       } else {
3528 +               if (crp->crp_flags & CRYPTO_F_IOV) {
3529 +                       DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
3530 +                       err = EINVAL;
3531 +                       goto err_srcmap;
3532 +               } else if (crp->crp_flags & CRYPTO_F_SKBUF) {
3533 +#ifdef NOTYET
3534 +                       int totlen, len;
3535 +                       struct mbuf *m, *m0, *mlast;
3536 +
3537 +                       KASSERT(cmd->dst_m == cmd->src_m,
3538 +                               ("hifn_crypto: dst_m initialized improperly"));
3539 +                       hifnstats.hst_unaligned++;
3540 +                       /*
3541 +                        * Source is not aligned on a longword boundary.
3542 +                        * Copy the data to insure alignment.  If we fail
3543 +                        * to allocate mbufs or clusters while doing this
3544 +                        * we return ERESTART so the operation is requeued
3545 +                        * at the crypto later, but only if there are
3546 +                        * ops already posted to the hardware; otherwise we
3547 +                        * have no guarantee that we'll be re-entered.
3548 +                        */
3549 +                       totlen = cmd->src_mapsize;
3550 +                       if (cmd->src_m->m_flags & M_PKTHDR) {
3551 +                               len = MHLEN;
3552 +                               MGETHDR(m0, M_DONTWAIT, MT_DATA);
3553 +                               if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
3554 +                                       m_free(m0);
3555 +                                       m0 = NULL;
3556 +                               }
3557 +                       } else {
3558 +                               len = MLEN;
3559 +                               MGET(m0, M_DONTWAIT, MT_DATA);
3560 +                       }
3561 +                       if (m0 == NULL) {
3562 +                               hifnstats.hst_nomem_mbuf++;
3563 +                               err = dma->cmdu ? ERESTART : ENOMEM;
3564 +                               goto err_srcmap;
3565 +                       }
3566 +                       if (totlen >= MINCLSIZE) {
3567 +                               MCLGET(m0, M_DONTWAIT);
3568 +                               if ((m0->m_flags & M_EXT) == 0) {
3569 +                                       hifnstats.hst_nomem_mcl++;
3570 +                                       err = dma->cmdu ? ERESTART : ENOMEM;
3571 +                                       m_freem(m0);
3572 +                                       goto err_srcmap;
3573 +                               }
3574 +                               len = MCLBYTES;
3575 +                       }
3576 +                       totlen -= len;
3577 +                       m0->m_pkthdr.len = m0->m_len = len;
3578 +                       mlast = m0;
3579 +
3580 +                       while (totlen > 0) {
3581 +                               MGET(m, M_DONTWAIT, MT_DATA);
3582 +                               if (m == NULL) {
3583 +                                       hifnstats.hst_nomem_mbuf++;
3584 +                                       err = dma->cmdu ? ERESTART : ENOMEM;
3585 +                                       m_freem(m0);
3586 +                                       goto err_srcmap;
3587 +                               }
3588 +                               len = MLEN;
3589 +                               if (totlen >= MINCLSIZE) {
3590 +                                       MCLGET(m, M_DONTWAIT);
3591 +                                       if ((m->m_flags & M_EXT) == 0) {
3592 +                                               hifnstats.hst_nomem_mcl++;
3593 +                                               err = dma->cmdu ? ERESTART : ENOMEM;
3594 +                                               mlast->m_next = m;
3595 +                                               m_freem(m0);
3596 +                                               goto err_srcmap;
3597 +                                       }
3598 +                                       len = MCLBYTES;
3599 +                               }
3600 +
3601 +                               m->m_len = len;
3602 +                               m0->m_pkthdr.len += len;
3603 +                               totlen -= len;
3604 +
3605 +                               mlast->m_next = m;
3606 +                               mlast = m;
3607 +                       }
3608 +                       cmd->dst_m = m0;
3609 +#else
3610 +                       device_printf(sc->sc_dev,
3611 +                                       "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
3612 +                                       __FILE__, __LINE__);
3613 +                       err = EINVAL;
3614 +                       goto err_srcmap;
3615 +#endif
3616 +               } else {
3617 +                       device_printf(sc->sc_dev,
3618 +                                       "%s,%d: unaligned contig buffers not implemented\n",
3619 +                                       __FILE__, __LINE__);
3620 +                       err = EINVAL;
3621 +                       goto err_srcmap;
3622 +               }
3623 +       }
3624 +
3625 +       if (cmd->dst_map == NULL) {
3626 +               if (crp->crp_flags & CRYPTO_F_SKBUF) {
3627 +                       if (pci_map_skb(sc, &cmd->dst, cmd->dst_skb)) {
3628 +                               hifnstats.hst_nomem_map++;
3629 +                               err = ENOMEM;
3630 +                               goto err_dstmap1;
3631 +                       }
3632 +               } else if (crp->crp_flags & CRYPTO_F_IOV) {
3633 +                       if (pci_map_uio(sc, &cmd->dst, cmd->dst_io)) {
3634 +                               hifnstats.hst_nomem_load++;
3635 +                               err = ENOMEM;
3636 +                               goto err_dstmap1;
3637 +                       }
3638 +               } else {
3639 +                       if (pci_map_buf(sc, &cmd->dst, cmd->dst_buf, crp->crp_ilen)) {
3640 +                               hifnstats.hst_nomem_load++;
3641 +                               err = ENOMEM;
3642 +                               goto err_dstmap1;
3643 +                       }
3644 +               }
3645 +       }
3646 +
3647 +#ifdef HIFN_DEBUG
3648 +       if (hifn_debug) {
3649 +               device_printf(sc->sc_dev,
3650 +                   "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
3651 +                   READ_REG_1(sc, HIFN_1_DMA_CSR),
3652 +                   READ_REG_1(sc, HIFN_1_DMA_IER),
3653 +                   dma->cmdu, dma->srcu, dma->dstu, dma->resu,
3654 +                   cmd->src_nsegs, cmd->dst_nsegs);
3655 +       }
3656 +#endif
3657 +
3658 +#if 0
3659 +       if (cmd->src_map == cmd->dst_map) {
3660 +               bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
3661 +                   BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
3662 +       } else {
3663 +               bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
3664 +                   BUS_DMASYNC_PREWRITE);
3665 +               bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
3666 +                   BUS_DMASYNC_PREREAD);
3667 +       }
3668 +#endif
3669 +
3670 +       /*
3671 +        * need N src, and N dst
3672 +        */
3673 +       if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
3674 +           (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
3675 +#ifdef HIFN_DEBUG
3676 +               if (hifn_debug) {
3677 +                       device_printf(sc->sc_dev,
3678 +                               "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
3679 +                               dma->srcu, cmd->src_nsegs,
3680 +                               dma->dstu, cmd->dst_nsegs);
3681 +               }
3682 +#endif
3683 +               hifnstats.hst_nomem_sd++;
3684 +               err = ERESTART;
3685 +               goto err_dstmap;
3686 +       }
3687 +
3688 +       if (dma->cmdi == HIFN_D_CMD_RSIZE) {
3689 +               dma->cmdi = 0;
3690 +               dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3691 +               wmb();
3692 +               dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
3693 +               HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
3694 +                   BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3695 +       }
3696 +       cmdi = dma->cmdi++;
3697 +       cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
3698 +       HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
3699 +
3700 +       /* .p for command/result already set */
3701 +       dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_LAST |
3702 +           HIFN_D_MASKDONEIRQ);
3703 +       wmb();
3704 +       dma->cmdr[cmdi].l |= htole32(HIFN_D_VALID);
3705 +       HIFN_CMDR_SYNC(sc, cmdi,
3706 +           BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3707 +       dma->cmdu++;
3708 +
3709 +       /*
3710 +        * We don't worry about missing an interrupt (which a "command wait"
3711 +        * interrupt salvages us from), unless there is more than one command
3712 +        * in the queue.
3713 +        */
3714 +       if (dma->cmdu > 1) {
3715 +               sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
3716 +               WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
3717 +       }
3718 +
3719 +       hifnstats.hst_ipackets++;
3720 +       hifnstats.hst_ibytes += cmd->src_mapsize;
3721 +
3722 +       hifn_dmamap_load_src(sc, cmd);
3723 +
3724 +       /*
3725 +        * Unlike other descriptors, we don't mask done interrupt from
3726 +        * result descriptor.
3727 +        */
3728 +#ifdef HIFN_DEBUG
3729 +       if (hifn_debug)
3730 +               device_printf(sc->sc_dev, "load res\n");
3731 +#endif
3732 +       if (dma->resi == HIFN_D_RES_RSIZE) {
3733 +               dma->resi = 0;
3734 +               dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3735 +               wmb();
3736 +               dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
3737 +               HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
3738 +                   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3739 +       }
3740 +       resi = dma->resi++;
3741 +       KASSERT(dma->hifn_commands[resi] == NULL,
3742 +               ("hifn_crypto: command slot %u busy", resi));
3743 +       dma->hifn_commands[resi] = cmd;
3744 +       HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
3745 +       if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
3746 +               dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
3747 +                   HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
3748 +               wmb();
3749 +               dma->resr[resi].l |= htole32(HIFN_D_VALID);
3750 +               sc->sc_curbatch++;
3751 +               if (sc->sc_curbatch > hifnstats.hst_maxbatch)
3752 +                       hifnstats.hst_maxbatch = sc->sc_curbatch;
3753 +               hifnstats.hst_totbatch++;
3754 +       } else {
3755 +               dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_LAST);
3756 +               wmb();
3757 +               dma->resr[resi].l |= htole32(HIFN_D_VALID);
3758 +               sc->sc_curbatch = 0;
3759 +       }
3760 +       HIFN_RESR_SYNC(sc, resi,
3761 +           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3762 +       dma->resu++;
3763 +
3764 +       if (cmd->sloplen)
3765 +               cmd->slopidx = resi;
3766 +
3767 +       hifn_dmamap_load_dst(sc, cmd);
3768 +
3769 +       csr = 0;
3770 +       if (sc->sc_c_busy == 0) {
3771 +               csr |= HIFN_DMACSR_C_CTRL_ENA;
3772 +               sc->sc_c_busy = 1;
3773 +       }
3774 +       if (sc->sc_s_busy == 0) {
3775 +               csr |= HIFN_DMACSR_S_CTRL_ENA;
3776 +               sc->sc_s_busy = 1;
3777 +       }
3778 +       if (sc->sc_r_busy == 0) {
3779 +               csr |= HIFN_DMACSR_R_CTRL_ENA;
3780 +               sc->sc_r_busy = 1;
3781 +       }
3782 +       if (sc->sc_d_busy == 0) {
3783 +               csr |= HIFN_DMACSR_D_CTRL_ENA;
3784 +               sc->sc_d_busy = 1;
3785 +       }
3786 +       if (csr)
3787 +               WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
3788 +
3789 +#ifdef HIFN_DEBUG
3790 +       if (hifn_debug) {
3791 +               device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
3792 +                   READ_REG_1(sc, HIFN_1_DMA_CSR),
3793 +                   READ_REG_1(sc, HIFN_1_DMA_IER));
3794 +       }
3795 +#endif
3796 +
3797 +       sc->sc_active = 5;
3798 +       HIFN_UNLOCK(sc);
3799 +       KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
3800 +       return (err);           /* success */
3801 +
3802 +err_dstmap:
3803 +       if (cmd->src_map != cmd->dst_map)
3804 +               pci_unmap_buf(sc, &cmd->dst);
3805 +err_dstmap1:
3806 +err_srcmap:
3807 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
3808 +               if (cmd->src_skb != cmd->dst_skb)
3809 +#ifdef NOTYET
3810 +                       m_freem(cmd->dst_m);
3811 +#else
3812 +                       device_printf(sc->sc_dev,
3813 +                                       "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
3814 +                                       __FILE__, __LINE__);
3815 +#endif
3816 +       }
3817 +       pci_unmap_buf(sc, &cmd->src);
3818 +err_srcmap1:
3819 +       HIFN_UNLOCK(sc);
3820 +       return (err);
3821 +}
3822 +
3823 +static void
3824 +hifn_tick(unsigned long arg)
3825 +{
3826 +       struct hifn_softc *sc;
3827 +       unsigned long l_flags;
3828 +
3829 +       if (arg >= HIFN_MAX_CHIPS)
3830 +               return;
3831 +       sc = hifn_chip_idx[arg];
3832 +       if (!sc)
3833 +               return;
3834 +
3835 +       HIFN_LOCK(sc);
3836 +       if (sc->sc_active == 0) {
3837 +               struct hifn_dma *dma = sc->sc_dma;
3838 +               u_int32_t r = 0;
3839 +
3840 +               if (dma->cmdu == 0 && sc->sc_c_busy) {
3841 +                       sc->sc_c_busy = 0;
3842 +                       r |= HIFN_DMACSR_C_CTRL_DIS;
3843 +               }
3844 +               if (dma->srcu == 0 && sc->sc_s_busy) {
3845 +                       sc->sc_s_busy = 0;
3846 +                       r |= HIFN_DMACSR_S_CTRL_DIS;
3847 +               }
3848 +               if (dma->dstu == 0 && sc->sc_d_busy) {
3849 +                       sc->sc_d_busy = 0;
3850 +                       r |= HIFN_DMACSR_D_CTRL_DIS;
3851 +               }
3852 +               if (dma->resu == 0 && sc->sc_r_busy) {
3853 +                       sc->sc_r_busy = 0;
3854 +                       r |= HIFN_DMACSR_R_CTRL_DIS;
3855 +               }
3856 +               if (r)
3857 +                       WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
3858 +       } else
3859 +               sc->sc_active--;
3860 +       HIFN_UNLOCK(sc);
3861 +       mod_timer(&sc->sc_tickto, jiffies + HZ);
3862 +}
3863 +
3864 +static irqreturn_t
3865 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
3866 +hifn_intr(int irq, void *arg)
3867 +#else
3868 +hifn_intr(int irq, void *arg, struct pt_regs *regs)
3869 +#endif
3870 +{
3871 +       struct hifn_softc *sc = arg;
3872 +       struct hifn_dma *dma;
3873 +       u_int32_t dmacsr, restart;
3874 +       int i, u;
3875 +       unsigned long l_flags;
3876 +
3877 +       dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
3878 +
3879 +       /* Nothing in the DMA unit interrupted */
3880 +       if ((dmacsr & sc->sc_dmaier) == 0)
3881 +               return IRQ_NONE;
3882 +
3883 +       HIFN_LOCK(sc);
3884 +
3885 +       dma = sc->sc_dma;
3886 +
3887 +#ifdef HIFN_DEBUG
3888 +       if (hifn_debug) {
3889 +               device_printf(sc->sc_dev,
3890 +                   "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
3891 +                   dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
3892 +                   dma->cmdi, dma->srci, dma->dsti, dma->resi,
3893 +                   dma->cmdk, dma->srck, dma->dstk, dma->resk,
3894 +                   dma->cmdu, dma->srcu, dma->dstu, dma->resu);
3895 +       }
3896 +#endif
3897 +
3898 +       WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
3899 +
3900 +       if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
3901 +           (dmacsr & HIFN_DMACSR_PUBDONE))
3902 +               WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
3903 +                   READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
3904 +
3905 +       restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
3906 +       if (restart)
3907 +               device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
3908 +
3909 +       if (sc->sc_flags & HIFN_IS_7811) {
3910 +               if (dmacsr & HIFN_DMACSR_ILLR)
3911 +                       device_printf(sc->sc_dev, "illegal read\n");
3912 +               if (dmacsr & HIFN_DMACSR_ILLW)
3913 +                       device_printf(sc->sc_dev, "illegal write\n");
3914 +       }
3915 +
3916 +       restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
3917 +           HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
3918 +       if (restart) {
3919 +               device_printf(sc->sc_dev, "abort, resetting.\n");
3920 +               hifnstats.hst_abort++;
3921 +               hifn_abort(sc);
3922 +               HIFN_UNLOCK(sc);
3923 +               return IRQ_HANDLED;
3924 +       }
3925 +
3926 +       if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
3927 +               /*
3928 +                * If no slots to process and we receive a "waiting on
3929 +                * command" interrupt, we disable the "waiting on command"
3930 +                * (by clearing it).
3931 +                */
3932 +               sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
3933 +               WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
3934 +       }
3935 +
3936 +       /* clear the rings */
3937 +       i = dma->resk; u = dma->resu;
3938 +       while (u != 0) {
3939 +               HIFN_RESR_SYNC(sc, i,
3940 +                   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3941 +               if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
3942 +                       HIFN_RESR_SYNC(sc, i,
3943 +                           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3944 +                       break;
3945 +               }
3946 +
3947 +               if (i != HIFN_D_RES_RSIZE) {
3948 +                       struct hifn_command *cmd;
3949 +                       u_int8_t *macbuf = NULL;
3950 +
3951 +                       HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
3952 +                       cmd = dma->hifn_commands[i];
3953 +                       KASSERT(cmd != NULL,
3954 +                               ("hifn_intr: null command slot %u", i));
3955 +                       dma->hifn_commands[i] = NULL;
3956 +
3957 +                       if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
3958 +                               macbuf = dma->result_bufs[i];
3959 +                               macbuf += 12;
3960 +                       }
3961 +
3962 +                       hifn_callback(sc, cmd, macbuf);
3963 +                       hifnstats.hst_opackets++;
3964 +                       u--;
3965 +               }
3966 +
3967 +               if (++i == (HIFN_D_RES_RSIZE + 1))
3968 +                       i = 0;
3969 +       }
3970 +       dma->resk = i; dma->resu = u;
3971 +
3972 +       i = dma->srck; u = dma->srcu;
3973 +       while (u != 0) {
3974 +               if (i == HIFN_D_SRC_RSIZE)
3975 +                       i = 0;
3976 +               HIFN_SRCR_SYNC(sc, i,
3977 +                   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3978 +               if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
3979 +                       HIFN_SRCR_SYNC(sc, i,
3980 +                           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3981 +                       break;
3982 +               }
3983 +               i++, u--;
3984 +       }
3985 +       dma->srck = i; dma->srcu = u;
3986 +
3987 +       i = dma->cmdk; u = dma->cmdu;
3988 +       while (u != 0) {
3989 +               HIFN_CMDR_SYNC(sc, i,
3990 +                   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3991 +               if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
3992 +                       HIFN_CMDR_SYNC(sc, i,
3993 +                           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3994 +                       break;
3995 +               }
3996 +               if (i != HIFN_D_CMD_RSIZE) {
3997 +                       u--;
3998 +                       HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
3999 +               }
4000 +               if (++i == (HIFN_D_CMD_RSIZE + 1))
4001 +                       i = 0;
4002 +       }
4003 +       dma->cmdk = i; dma->cmdu = u;
4004 +
4005 +       HIFN_UNLOCK(sc);
4006 +
4007 +       if (sc->sc_needwakeup) {                /* XXX check high watermark */
4008 +               int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
4009 +#ifdef HIFN_DEBUG
4010 +               if (hifn_debug)
4011 +                       device_printf(sc->sc_dev,
4012 +                               "wakeup crypto (%x) u %d/%d/%d/%d\n",
4013 +                               sc->sc_needwakeup,
4014 +                               dma->cmdu, dma->srcu, dma->dstu, dma->resu);
4015 +#endif
4016 +               sc->sc_needwakeup &= ~wakeup;
4017 +               crypto_unblock(sc->sc_cid, wakeup);
4018 +       }
4019 +
4020 +       return IRQ_HANDLED;
4021 +}
4022 +
4023 +/*
4024 + * Allocate a new 'session' and return an encoded session id.  'sidp'
4025 + * contains our registration id, and should contain an encoded session
4026 + * id on successful allocation.
4027 + */
4028 +static int
4029 +hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
4030 +{
4031 +       struct hifn_softc *sc = device_get_softc(dev);
4032 +       struct cryptoini *c;
4033 +       int mac = 0, cry = 0, sesn;
4034 +       struct hifn_session *ses = NULL;
4035 +       unsigned long l_flags;
4036 +
4037 +       DPRINTF("%s()\n", __FUNCTION__);
4038 +
4039 +       KASSERT(sc != NULL, ("hifn_newsession: null softc"));
4040 +       if (sidp == NULL || cri == NULL || sc == NULL) {
4041 +               DPRINTF("%s,%d: %s - EINVAL\n", __FILE__, __LINE__, __FUNCTION__);
4042 +               return (EINVAL);
4043 +       }
4044 +
4045 +       HIFN_LOCK(sc);
4046 +       if (sc->sc_sessions == NULL) {
4047 +               ses = sc->sc_sessions = (struct hifn_session *)kmalloc(sizeof(*ses),
4048 +                               SLAB_ATOMIC);
4049 +               if (ses == NULL) {
4050 +                       HIFN_UNLOCK(sc);
4051 +                       return (ENOMEM);
4052 +               }
4053 +               sesn = 0;
4054 +               sc->sc_nsessions = 1;
4055 +       } else {
4056 +               for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
4057 +                       if (!sc->sc_sessions[sesn].hs_used) {
4058 +                               ses = &sc->sc_sessions[sesn];
4059 +                               break;
4060 +                       }
4061 +               }
4062 +
4063 +               if (ses == NULL) {
4064 +                       sesn = sc->sc_nsessions;
4065 +                       ses = (struct hifn_session *)kmalloc((sesn + 1) * sizeof(*ses),
4066 +                                       SLAB_ATOMIC);
4067 +                       if (ses == NULL) {
4068 +                               HIFN_UNLOCK(sc);
4069 +                               return (ENOMEM);
4070 +                       }
4071 +                       bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
4072 +                       bzero(sc->sc_sessions, sesn * sizeof(*ses));
4073 +                       kfree(sc->sc_sessions);
4074 +                       sc->sc_sessions = ses;
4075 +                       ses = &sc->sc_sessions[sesn];
4076 +                       sc->sc_nsessions++;
4077 +               }
4078 +       }
4079 +       HIFN_UNLOCK(sc);
4080 +
4081 +       bzero(ses, sizeof(*ses));
4082 +       ses->hs_used = 1;
4083 +
4084 +       for (c = cri; c != NULL; c = c->cri_next) {
4085 +               switch (c->cri_alg) {
4086 +               case CRYPTO_MD5:
4087 +               case CRYPTO_SHA1:
4088 +               case CRYPTO_MD5_HMAC:
4089 +               case CRYPTO_SHA1_HMAC:
4090 +                       if (mac) {
4091 +                               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4092 +                               return (EINVAL);
4093 +                       }
4094 +                       mac = 1;
4095 +                       ses->hs_mlen = c->cri_mlen;
4096 +                       if (ses->hs_mlen == 0) {
4097 +                               switch (c->cri_alg) {
4098 +                               case CRYPTO_MD5:
4099 +                               case CRYPTO_MD5_HMAC:
4100 +                                       ses->hs_mlen = 16;
4101 +                                       break;
4102 +                               case CRYPTO_SHA1:
4103 +                               case CRYPTO_SHA1_HMAC:
4104 +                                       ses->hs_mlen = 20;
4105 +                                       break;
4106 +                               }
4107 +                       }
4108 +                       break;
4109 +               case CRYPTO_DES_CBC:
4110 +               case CRYPTO_3DES_CBC:
4111 +               case CRYPTO_AES_CBC:
4112 +                       /* XXX this may read fewer, does it matter? */
4113 +                       read_random(ses->hs_iv,
4114 +                               c->cri_alg == CRYPTO_AES_CBC ?
4115 +                                       HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
4116 +                       /*FALLTHROUGH*/
4117 +               case CRYPTO_ARC4:
4118 +                       if (cry) {
4119 +                               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4120 +                               return (EINVAL);
4121 +                       }
4122 +                       cry = 1;
4123 +                       break;
4124 +               default:
4125 +                       DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4126 +                       return (EINVAL);
4127 +               }
4128 +       }
4129 +       if (mac == 0 && cry == 0) {
4130 +               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4131 +               return (EINVAL);
4132 +       }
4133 +
4134 +       *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
4135 +
4136 +       return (0);
4137 +}
4138 +
4139 +/*
4140 + * Deallocate a session.
4141 + * XXX this routine should run a zero'd mac/encrypt key into context ram.
4142 + * XXX to blow away any keys already stored there.
4143 + */
4144 +static int
4145 +hifn_freesession(device_t dev, u_int64_t tid)
4146 +{
4147 +       struct hifn_softc *sc = device_get_softc(dev);
4148 +       int session, error;
4149 +       u_int32_t sid = CRYPTO_SESID2LID(tid);
4150 +       unsigned long l_flags;
4151 +
4152 +       DPRINTF("%s()\n", __FUNCTION__);
4153 +
4154 +       KASSERT(sc != NULL, ("hifn_freesession: null softc"));
4155 +       if (sc == NULL) {
4156 +               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4157 +               return (EINVAL);
4158 +       }
4159 +
4160 +       HIFN_LOCK(sc);
4161 +       session = HIFN_SESSION(sid);
4162 +       if (session < sc->sc_nsessions) {
4163 +               bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
4164 +               error = 0;
4165 +       } else {
4166 +               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4167 +               error = EINVAL;
4168 +       }
4169 +       HIFN_UNLOCK(sc);
4170 +
4171 +       return (error);
4172 +}
4173 +
4174 +static int
4175 +hifn_process(device_t dev, struct cryptop *crp, int hint)
4176 +{
4177 +       struct hifn_softc *sc = device_get_softc(dev);
4178 +       struct hifn_command *cmd = NULL;
4179 +       int session, err, ivlen;
4180 +       struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
4181 +
4182 +       DPRINTF("%s()\n", __FUNCTION__);
4183 +
4184 +       if (crp == NULL || crp->crp_callback == NULL) {
4185 +               hifnstats.hst_invalid++;
4186 +               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4187 +               return (EINVAL);
4188 +       }
4189 +       session = HIFN_SESSION(crp->crp_sid);
4190 +
4191 +       if (sc == NULL || session >= sc->sc_nsessions) {
4192 +               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4193 +               err = EINVAL;
4194 +               goto errout;
4195 +       }
4196 +
4197 +       cmd = kmalloc(sizeof(struct hifn_command), SLAB_ATOMIC);
4198 +       if (cmd == NULL) {
4199 +               hifnstats.hst_nomem++;
4200 +               err = ENOMEM;
4201 +               goto errout;
4202 +       }
4203 +       memset(cmd, 0, sizeof(*cmd));
4204 +
4205 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
4206 +               cmd->src_skb = (struct sk_buff *)crp->crp_buf;
4207 +               cmd->dst_skb = (struct sk_buff *)crp->crp_buf;
4208 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
4209 +               cmd->src_io = (struct uio *)crp->crp_buf;
4210 +               cmd->dst_io = (struct uio *)crp->crp_buf;
4211 +       } else {
4212 +               cmd->src_buf = crp->crp_buf;
4213 +               cmd->dst_buf = crp->crp_buf;
4214 +       }
4215 +
4216 +       crd1 = crp->crp_desc;
4217 +       if (crd1 == NULL) {
4218 +               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4219 +               err = EINVAL;
4220 +               goto errout;
4221 +       }
4222 +       crd2 = crd1->crd_next;
4223 +
4224 +       if (crd2 == NULL) {
4225 +               if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
4226 +                   crd1->crd_alg == CRYPTO_SHA1_HMAC ||
4227 +                   crd1->crd_alg == CRYPTO_SHA1 ||
4228 +                   crd1->crd_alg == CRYPTO_MD5) {
4229 +                       maccrd = crd1;
4230 +                       enccrd = NULL;
4231 +               } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
4232 +                   crd1->crd_alg == CRYPTO_3DES_CBC ||
4233 +                   crd1->crd_alg == CRYPTO_AES_CBC ||
4234 +                   crd1->crd_alg == CRYPTO_ARC4) {
4235 +                       if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
4236 +                               cmd->base_masks |= HIFN_BASE_CMD_DECODE;
4237 +                       maccrd = NULL;
4238 +                       enccrd = crd1;
4239 +               } else {
4240 +                       DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4241 +                       err = EINVAL;
4242 +                       goto errout;
4243 +               }
4244 +       } else {
4245 +               if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
4246 +                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
4247 +                     crd1->crd_alg == CRYPTO_MD5 ||
4248 +                     crd1->crd_alg == CRYPTO_SHA1) &&
4249 +                   (crd2->crd_alg == CRYPTO_DES_CBC ||
4250 +                    crd2->crd_alg == CRYPTO_3DES_CBC ||
4251 +                    crd2->crd_alg == CRYPTO_AES_CBC ||
4252 +                    crd2->crd_alg == CRYPTO_ARC4) &&
4253 +                   ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
4254 +                       cmd->base_masks = HIFN_BASE_CMD_DECODE;
4255 +                       maccrd = crd1;
4256 +                       enccrd = crd2;
4257 +               } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
4258 +                    crd1->crd_alg == CRYPTO_ARC4 ||
4259 +                    crd1->crd_alg == CRYPTO_3DES_CBC ||
4260 +                    crd1->crd_alg == CRYPTO_AES_CBC) &&
4261 +                   (crd2->crd_alg == CRYPTO_MD5_HMAC ||
4262 +                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
4263 +                     crd2->crd_alg == CRYPTO_MD5 ||
4264 +                     crd2->crd_alg == CRYPTO_SHA1) &&
4265 +                   (crd1->crd_flags & CRD_F_ENCRYPT)) {
4266 +                       enccrd = crd1;
4267 +                       maccrd = crd2;
4268 +               } else {
4269 +                       /*
4270 +                        * We cannot order the 7751 as requested
4271 +                        */
4272 +                       DPRINTF("%s,%d: %s %d,%d,%d - EINVAL\n",__FILE__,__LINE__,__FUNCTION__, crd1->crd_alg, crd2->crd_alg, crd1->crd_flags & CRD_F_ENCRYPT);
4273 +                       err = EINVAL;
4274 +                       goto errout;
4275 +               }
4276 +       }
4277 +
4278 +       if (enccrd) {
4279 +               cmd->enccrd = enccrd;
4280 +               cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
4281 +               switch (enccrd->crd_alg) {
4282 +               case CRYPTO_ARC4:
4283 +                       cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
4284 +                       break;
4285 +               case CRYPTO_DES_CBC:
4286 +                       cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
4287 +                           HIFN_CRYPT_CMD_MODE_CBC |
4288 +                           HIFN_CRYPT_CMD_NEW_IV;
4289 +                       break;
4290 +               case CRYPTO_3DES_CBC:
4291 +                       cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
4292 +                           HIFN_CRYPT_CMD_MODE_CBC |
4293 +                           HIFN_CRYPT_CMD_NEW_IV;
4294 +                       break;
4295 +               case CRYPTO_AES_CBC:
4296 +                       cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
4297 +                           HIFN_CRYPT_CMD_MODE_CBC |
4298 +                           HIFN_CRYPT_CMD_NEW_IV;
4299 +                       break;
4300 +               default:
4301 +                       DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4302 +                       err = EINVAL;
4303 +                       goto errout;
4304 +               }
4305 +               if (enccrd->crd_alg != CRYPTO_ARC4) {
4306 +                       ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
4307 +                               HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
4308 +                       if (enccrd->crd_flags & CRD_F_ENCRYPT) {
4309 +                               if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
4310 +                                       bcopy(enccrd->crd_iv, cmd->iv, ivlen);
4311 +                               else
4312 +                                       bcopy(sc->sc_sessions[session].hs_iv,
4313 +                                           cmd->iv, ivlen);
4314 +
4315 +                               if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
4316 +                                   == 0) {
4317 +                                       crypto_copyback(crp->crp_flags,
4318 +                                           crp->crp_buf, enccrd->crd_inject,
4319 +                                           ivlen, cmd->iv);
4320 +                               }
4321 +                       } else {
4322 +                               if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
4323 +                                       bcopy(enccrd->crd_iv, cmd->iv, ivlen);
4324 +                               else {
4325 +                                       crypto_copydata(crp->crp_flags,
4326 +                                           crp->crp_buf, enccrd->crd_inject,
4327 +                                           ivlen, cmd->iv);
4328 +                               }
4329 +                       }
4330 +               }
4331 +
4332 +               if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
4333 +                       cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
4334 +               cmd->ck = enccrd->crd_key;
4335 +               cmd->cklen = enccrd->crd_klen >> 3;
4336 +               cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
4337 +
4338 +               /* 
4339 +                * Need to specify the size for the AES key in the masks.
4340 +                */
4341 +               if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
4342 +                   HIFN_CRYPT_CMD_ALG_AES) {
4343 +                       switch (cmd->cklen) {
4344 +                       case 16:
4345 +                               cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
4346 +                               break;
4347 +                       case 24:
4348 +                               cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
4349 +                               break;
4350 +                       case 32:
4351 +                               cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
4352 +                               break;
4353 +                       default:
4354 +                               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4355 +                               err = EINVAL;
4356 +                               goto errout;
4357 +                       }
4358 +               }
4359 +       }
4360 +
4361 +       if (maccrd) {
4362 +               cmd->maccrd = maccrd;
4363 +               cmd->base_masks |= HIFN_BASE_CMD_MAC;
4364 +
4365 +               switch (maccrd->crd_alg) {
4366 +               case CRYPTO_MD5:
4367 +                       cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
4368 +                           HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
4369 +                           HIFN_MAC_CMD_POS_IPSEC;
4370 +                       break;
4371 +               case CRYPTO_MD5_HMAC:
4372 +                       cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
4373 +                           HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
4374 +                           HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
4375 +                       break;
4376 +               case CRYPTO_SHA1:
4377 +                       cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
4378 +                           HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
4379 +                           HIFN_MAC_CMD_POS_IPSEC;
4380 +                       break;
4381 +               case CRYPTO_SHA1_HMAC:
4382 +                       cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
4383 +                           HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
4384 +                           HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
4385 +                       break;
4386 +               }
4387 +
4388 +               if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
4389 +                    maccrd->crd_alg == CRYPTO_MD5_HMAC) {
4390 +                       cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
4391 +                       bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
4392 +                       bzero(cmd->mac + (maccrd->crd_klen >> 3),
4393 +                           HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
4394 +               }
4395 +       }
4396 +
4397 +       cmd->crp = crp;
4398 +       cmd->session_num = session;
4399 +       cmd->softc = sc;
4400 +
4401 +       err = hifn_crypto(sc, cmd, crp, hint);
4402 +       if (!err) {
4403 +               return 0;
4404 +       } else if (err == ERESTART) {
4405 +               /*
4406 +                * There weren't enough resources to dispatch the request
4407 +                * to the part.  Notify the caller so they'll requeue this
4408 +                * request and resubmit it again soon.
4409 +                */
4410 +#ifdef HIFN_DEBUG
4411 +               if (hifn_debug)
4412 +                       device_printf(sc->sc_dev, "requeue request\n");
4413 +#endif
4414 +               kfree(cmd);
4415 +               sc->sc_needwakeup |= CRYPTO_SYMQ;
4416 +               return (err);
4417 +       }
4418 +
4419 +errout:
4420 +       if (cmd != NULL)
4421 +               kfree(cmd);
4422 +       if (err == EINVAL)
4423 +               hifnstats.hst_invalid++;
4424 +       else
4425 +               hifnstats.hst_nomem++;
4426 +       crp->crp_etype = err;
4427 +       crypto_done(crp);
4428 +       return (err);
4429 +}
4430 +
4431 +static void
4432 +hifn_abort(struct hifn_softc *sc)
4433 +{
4434 +       struct hifn_dma *dma = sc->sc_dma;
4435 +       struct hifn_command *cmd;
4436 +       struct cryptop *crp;
4437 +       int i, u;
4438 +
4439 +       DPRINTF("%s()\n", __FUNCTION__);
4440 +
4441 +       i = dma->resk; u = dma->resu;
4442 +       while (u != 0) {
4443 +               cmd = dma->hifn_commands[i];
4444 +               KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
4445 +               dma->hifn_commands[i] = NULL;
4446 +               crp = cmd->crp;
4447 +
4448 +               if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
4449 +                       /* Salvage what we can. */
4450 +                       u_int8_t *macbuf;
4451 +
4452 +                       if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
4453 +                               macbuf = dma->result_bufs[i];
4454 +                               macbuf += 12;
4455 +                       } else
4456 +                               macbuf = NULL;
4457 +                       hifnstats.hst_opackets++;
4458 +                       hifn_callback(sc, cmd, macbuf);
4459 +               } else {
4460 +#if 0
4461 +                       if (cmd->src_map == cmd->dst_map) {
4462 +                               bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4463 +                                   BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4464 +                       } else {
4465 +                               bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4466 +                                   BUS_DMASYNC_POSTWRITE);
4467 +                               bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
4468 +                                   BUS_DMASYNC_POSTREAD);
4469 +                       }
4470 +#endif
4471 +
4472 +                       if (cmd->src_skb != cmd->dst_skb) {
4473 +#ifdef NOTYET
4474 +                               m_freem(cmd->src_m);
4475 +                               crp->crp_buf = (caddr_t)cmd->dst_m;
4476 +#else
4477 +                               device_printf(sc->sc_dev,
4478 +                                               "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
4479 +                                               __FILE__, __LINE__);
4480 +#endif
4481 +                       }
4482 +
4483 +                       /* non-shared buffers cannot be restarted */
4484 +                       if (cmd->src_map != cmd->dst_map) {
4485 +                               /*
4486 +                                * XXX should be EAGAIN, delayed until
4487 +                                * after the reset.
4488 +                                */
4489 +                               crp->crp_etype = ENOMEM;
4490 +                               pci_unmap_buf(sc, &cmd->dst);
4491 +                       } else
4492 +                               crp->crp_etype = ENOMEM;
4493 +
4494 +                       pci_unmap_buf(sc, &cmd->src);
4495 +
4496 +                       kfree(cmd);
4497 +                       if (crp->crp_etype != EAGAIN)
4498 +                               crypto_done(crp);
4499 +               }
4500 +
4501 +               if (++i == HIFN_D_RES_RSIZE)
4502 +                       i = 0;
4503 +               u--;
4504 +       }
4505 +       dma->resk = i; dma->resu = u;
4506 +
4507 +       hifn_reset_board(sc, 1);
4508 +       hifn_init_dma(sc);
4509 +       hifn_init_pci_registers(sc);
4510 +}
4511 +
4512 +static void
4513 +hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
4514 +{
4515 +       struct hifn_dma *dma = sc->sc_dma;
4516 +       struct cryptop *crp = cmd->crp;
4517 +       struct cryptodesc *crd;
4518 +       int i, u, ivlen;
4519 +
4520 +       DPRINTF("%s()\n", __FUNCTION__);
4521 +
4522 +#if 0
4523 +       if (cmd->src_map == cmd->dst_map) {
4524 +               bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4525 +                   BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
4526 +       } else {
4527 +               bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4528 +                   BUS_DMASYNC_POSTWRITE);
4529 +               bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
4530 +                   BUS_DMASYNC_POSTREAD);
4531 +       }
4532 +#endif
4533 +
4534 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
4535 +               if (cmd->src_skb != cmd->dst_skb) {
4536 +#ifdef NOTYET
4537 +                       crp->crp_buf = (caddr_t)cmd->dst_m;
4538 +                       totlen = cmd->src_mapsize;
4539 +                       for (m = cmd->dst_m; m != NULL; m = m->m_next) {
4540 +                               if (totlen < m->m_len) {
4541 +                                       m->m_len = totlen;
4542 +                                       totlen = 0;
4543 +                               } else
4544 +                                       totlen -= m->m_len;
4545 +                       }
4546 +                       cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
4547 +                       m_freem(cmd->src_m);
4548 +#else
4549 +                       device_printf(sc->sc_dev,
4550 +                                       "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
4551 +                                       __FILE__, __LINE__);
4552 +#endif
4553 +               }
4554 +       }
4555 +
4556 +       if (cmd->sloplen != 0) {
4557 +               crypto_copyback(crp->crp_flags, crp->crp_buf,
4558 +                   cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
4559 +                   (caddr_t)&dma->slop[cmd->slopidx]);
4560 +       }
4561 +
4562 +       i = dma->dstk; u = dma->dstu;
4563 +       while (u != 0) {
4564 +               if (i == HIFN_D_DST_RSIZE)
4565 +                       i = 0;
4566 +#if 0
4567 +               bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
4568 +                   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4569 +#endif
4570 +               if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
4571 +#if 0
4572 +                       bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
4573 +                           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4574 +#endif
4575 +                       break;
4576 +               }
4577 +               i++, u--;
4578 +       }
4579 +       dma->dstk = i; dma->dstu = u;
4580 +
4581 +       hifnstats.hst_obytes += cmd->dst_mapsize;
4582 +
4583 +       if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
4584 +           HIFN_BASE_CMD_CRYPT) {
4585 +               for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
4586 +                       if (crd->crd_alg != CRYPTO_DES_CBC &&
4587 +                           crd->crd_alg != CRYPTO_3DES_CBC &&
4588 +                           crd->crd_alg != CRYPTO_AES_CBC)
4589 +                               continue;
4590 +                       ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
4591 +                               HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
4592 +                       crypto_copydata(crp->crp_flags, crp->crp_buf,
4593 +                           crd->crd_skip + crd->crd_len - ivlen, ivlen,
4594 +                           cmd->softc->sc_sessions[cmd->session_num].hs_iv);
4595 +                       break;
4596 +               }
4597 +       }
4598 +
4599 +       if (macbuf != NULL) {
4600 +               for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
4601 +                        int len;
4602 +
4603 +                       if (crd->crd_alg != CRYPTO_MD5 &&
4604 +                           crd->crd_alg != CRYPTO_SHA1 &&
4605 +                           crd->crd_alg != CRYPTO_MD5_HMAC &&
4606 +                           crd->crd_alg != CRYPTO_SHA1_HMAC) {
4607 +                               continue;
4608 +                       }
4609 +                       len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
4610 +                       crypto_copyback(crp->crp_flags, crp->crp_buf,
4611 +                           crd->crd_inject, len, macbuf);
4612 +                       break;
4613 +               }
4614 +       }
4615 +
4616 +       if (cmd->src_map != cmd->dst_map)
4617 +               pci_unmap_buf(sc, &cmd->dst);
4618 +       pci_unmap_buf(sc, &cmd->src);
4619 +       kfree(cmd);
4620 +       crypto_done(crp);
4621 +}
4622 +
4623 +/*
4624 + * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
4625 + * and Group 1 registers; avoid conditions that could create
4626 + * burst writes by doing a read in between the writes.
4627 + *
4628 + * NB: The read we interpose is always to the same register;
4629 + *     we do this because reading from an arbitrary (e.g. last)
4630 + *     register may not always work.
4631 + */
4632 +static void
4633 +hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
4634 +{
4635 +       if (sc->sc_flags & HIFN_IS_7811) {
4636 +               if (sc->sc_bar0_lastreg == reg - 4)
4637 +                       readl(sc->sc_bar0 + HIFN_0_PUCNFG);
4638 +               sc->sc_bar0_lastreg = reg;
4639 +       }
4640 +       writel(val, sc->sc_bar0 + reg);
4641 +}
4642 +
4643 +static void
4644 +hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
4645 +{
4646 +       if (sc->sc_flags & HIFN_IS_7811) {
4647 +               if (sc->sc_bar1_lastreg == reg - 4)
4648 +                       readl(sc->sc_bar1 + HIFN_1_REVID);
4649 +               sc->sc_bar1_lastreg = reg;
4650 +       }
4651 +       writel(val, sc->sc_bar1 + reg);
4652 +}
4653 +
4654 +
4655 +static struct pci_device_id hifn_pci_tbl[] = {
4656 +       { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
4657 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4658 +       { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
4659 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4660 +       { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
4661 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4662 +       { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
4663 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4664 +       { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
4665 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4666 +       { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
4667 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4668 +       /*
4669 +        * Other vendors share this PCI ID as well, such as
4670 +        * http://www.powercrypt.com, and obviously they also
4671 +        * use the same key.
4672 +        */
4673 +       { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
4674 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4675 +       { 0, 0, 0, 0, 0, 0, }
4676 +};
4677 +MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
4678 +
4679 +static struct pci_driver hifn_driver = {
4680 +       .name         = "hifn",
4681 +       .id_table     = hifn_pci_tbl,
4682 +       .probe        = hifn_probe,
4683 +       .remove       = hifn_remove,
4684 +       /* add PM stuff here one day */
4685 +};
4686 +
4687 +static int __init hifn_init (void)
4688 +{
4689 +       struct hifn_softc *sc = NULL;
4690 +       int rc;
4691 +
4692 +       DPRINTF("%s(%p)\n", __FUNCTION__, hifn_init);
4693 +
4694 +       rc = pci_register_driver(&hifn_driver);
4695 +       pci_register_driver_compat(&hifn_driver, rc);
4696 +
4697 +       return rc;
4698 +}
4699 +
4700 +static void __exit hifn_exit (void)
4701 +{
4702 +       pci_unregister_driver(&hifn_driver);
4703 +}
4704 +
4705 +module_init(hifn_init);
4706 +module_exit(hifn_exit);
4707 +
4708 +MODULE_LICENSE("BSD");
4709 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
4710 +MODULE_DESCRIPTION("OCF driver for hifn PCI crypto devices");
4711 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
4712 +++ linux/crypto/ocf/hifn/hifnHIPP.c    2007-07-25 14:36:45.000000000 +1000
4713 @@ -0,0 +1,420 @@
4714 +/*-
4715 + * Driver for Hifn HIPP-I/II chipset
4716 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
4717 + *
4718 + * Redistribution and use in source and binary forms, with or without
4719 + * modification, are permitted provided that the following conditions
4720 + * are met:
4721 + *
4722 + * 1. Redistributions of source code must retain the above copyright
4723 + *   notice, this list of conditions and the following disclaimer.
4724 + * 2. Redistributions in binary form must reproduce the above copyright
4725 + *   notice, this list of conditions and the following disclaimer in the
4726 + *   documentation and/or other materials provided with the distribution.
4727 + * 3. The name of the author may not be used to endorse or promote products
4728 + *   derived from this software without specific prior written permission.
4729 + *
4730 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
4731 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
4732 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
4733 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
4734 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
4735 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
4736 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
4737 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4738 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
4739 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4740 + *
4741 + * Effort sponsored by Hifn Inc.
4742 + *
4743 + */
4744 +
4745 +/*
4746 + * Driver for various Hifn encryption processors.
4747 + */
4748 +#ifndef AUTOCONF_INCLUDED
4749 +#include <linux/config.h>
4750 +#endif
4751 +#include <linux/module.h>
4752 +#include <linux/init.h>
4753 +#include <linux/list.h>
4754 +#include <linux/slab.h>
4755 +#include <linux/wait.h>
4756 +#include <linux/sched.h>
4757 +#include <linux/pci.h>
4758 +#include <linux/delay.h>
4759 +#include <linux/interrupt.h>
4760 +#include <linux/spinlock.h>
4761 +#include <linux/random.h>
4762 +#include <linux/version.h>
4763 +#include <linux/skbuff.h>
4764 +#include <linux/uio.h>
4765 +#include <linux/sysfs.h>
4766 +#include <linux/miscdevice.h>
4767 +#include <asm/io.h>
4768 +
4769 +#include <cryptodev.h>
4770 +
4771 +#include "hifnHIPPreg.h"
4772 +#include "hifnHIPPvar.h"
4773 +
4774 +#if 1
4775 +#define        DPRINTF(a...)   if (hipp_debug) { \
4776 +                                                       printk("%s: ", sc ? \
4777 +                                                               device_get_nameunit(sc->sc_dev) : "hifn"); \
4778 +                                                       printk(a); \
4779 +                                               } else
4780 +#else
4781 +#define        DPRINTF(a...)
4782 +#endif
4783 +
4784 +typedef int bus_size_t;
4785 +
4786 +static inline int
4787 +pci_get_revid(struct pci_dev *dev)
4788 +{
4789 +       u8 rid = 0;
4790 +       pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
4791 +       return rid;
4792 +}
4793 +
4794 +#define debug hipp_debug
4795 +int hipp_debug = 0;
4796 +module_param(hipp_debug, int, 0644);
4797 +MODULE_PARM_DESC(hipp_debug, "Enable debug");
4798 +
4799 +int hipp_maxbatch = 1;
4800 +module_param(hipp_maxbatch, int, 0644);
4801 +MODULE_PARM_DESC(hipp_maxbatch, "max ops to batch w/o interrupt");
4802 +
4803 +static int  hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent);
4804 +static void hipp_remove(struct pci_dev *dev);
4805 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
4806 +static irqreturn_t hipp_intr(int irq, void *arg);
4807 +#else
4808 +static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs);
4809 +#endif
4810 +
4811 +static int hipp_num_chips = 0;
4812 +static struct hipp_softc *hipp_chip_idx[HIPP_MAX_CHIPS];
4813 +
4814 +static int hipp_newsession(device_t, u_int32_t *, struct cryptoini *);
4815 +static int hipp_freesession(device_t, u_int64_t);
4816 +static int hipp_process(device_t, struct cryptop *, int);
4817 +
4818 +static device_method_t hipp_methods = {
4819 +       /* crypto device methods */
4820 +       DEVMETHOD(cryptodev_newsession, hipp_newsession),
4821 +       DEVMETHOD(cryptodev_freesession,hipp_freesession),
4822 +       DEVMETHOD(cryptodev_process,    hipp_process),
4823 +};
4824 +
4825 +static __inline u_int32_t
4826 +READ_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg)
4827 +{
4828 +       u_int32_t v = readl(sc->sc_bar[barno] + reg);
4829 +       //sc->sc_bar0_lastreg = (bus_size_t) -1;
4830 +       return (v);
4831 +}
4832 +static __inline void
4833 +WRITE_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg, u_int32_t val)
4834 +{
4835 +       writel(val, sc->sc_bar[barno] + reg);
4836 +}
4837 +
4838 +#define READ_REG_0(sc, reg)         READ_REG(sc, 0, reg)
4839 +#define WRITE_REG_0(sc, reg, val)   WRITE_REG(sc,0, reg, val)
4840 +#define READ_REG_1(sc, reg)         READ_REG(sc, 1, reg)
4841 +#define WRITE_REG_1(sc, reg, val)   WRITE_REG(sc,1, reg, val)
4842 +
4843 +static int
4844 +hipp_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
4845 +{
4846 +       return EINVAL;
4847 +}
4848 +
4849 +static int
4850 +hipp_freesession(device_t dev, u_int64_t tid)
4851 +{
4852 +       return EINVAL;
4853 +}
4854 +
4855 +static int
4856 +hipp_process(device_t dev, struct cryptop *crp, int hint)
4857 +{
4858 +       return EINVAL;
4859 +}
4860 +
4861 +static const char*
4862 +hipp_partname(struct hipp_softc *sc, char buf[128], size_t blen)
4863 +{
4864 +       char *n = NULL;
4865 +
4866 +       switch (pci_get_vendor(sc->sc_pcidev)) {
4867 +       case PCI_VENDOR_HIFN:
4868 +               switch (pci_get_device(sc->sc_pcidev)) {
4869 +               case PCI_PRODUCT_HIFN_7855:     n = "Hifn 7855";
4870 +               case PCI_PRODUCT_HIFN_8155:     n = "Hifn 8155";
4871 +               case PCI_PRODUCT_HIFN_6500:     n = "Hifn 6500";
4872 +               }
4873 +       }
4874 +
4875 +       if(n==NULL) {
4876 +               snprintf(buf, blen, "VID=%02x,PID=%02x",
4877 +                        pci_get_vendor(sc->sc_pcidev),
4878 +                        pci_get_device(sc->sc_pcidev));
4879 +       } else {
4880 +               buf[0]='\0';
4881 +               strncat(buf, n, blen);
4882 +       }
4883 +       return buf;
4884 +}
4885 +
4886 +struct hipp_fs_entry {
4887 +       struct attribute attr;
4888 +       /* other stuff */
4889 +};
4890 +
4891 +
4892 +static ssize_t
4893 +cryptoid_show(struct device *dev,
4894 +             struct device_attribute *attr,
4895 +             char *buf)                                                
4896 +{                                                              
4897 +       struct hipp_softc *sc;                                  
4898 +
4899 +       sc = pci_get_drvdata(to_pci_dev (dev));
4900 +       return sprintf (buf, "%d\n", sc->sc_cid);
4901 +}
4902 +
4903 +struct device_attribute hipp_dev_cryptoid = __ATTR_RO(cryptoid);
4904 +
4905 +/*
4906 + * Attach an interface that successfully probed.
4907 + */
4908 +static int
4909 +hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent)
4910 +{
4911 +       struct hipp_softc *sc = NULL;
4912 +       int i;
4913 +       //char rbase;
4914 +       //u_int16_t ena;
4915 +       int rev;
4916 +       //int rseg;
4917 +       int rc;
4918 +
4919 +       DPRINTF("%s()\n", __FUNCTION__);
4920 +
4921 +       if (pci_enable_device(dev) < 0)
4922 +               return(-ENODEV);
4923 +
4924 +       if (pci_set_mwi(dev))
4925 +               return(-ENODEV);
4926 +
4927 +       if (!dev->irq) {
4928 +               printk("hifn: found device with no IRQ assigned. check BIOS settings!");
4929 +               pci_disable_device(dev);
4930 +               return(-ENODEV);
4931 +       }
4932 +
4933 +       sc = (struct hipp_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
4934 +       if (!sc)
4935 +               return(-ENOMEM);
4936 +       memset(sc, 0, sizeof(*sc));
4937 +
4938 +       softc_device_init(sc, "hifn-hipp", hipp_num_chips, hipp_methods);
4939 +
4940 +       sc->sc_pcidev = dev;
4941 +       sc->sc_irq = -1;
4942 +       sc->sc_cid = -1;
4943 +       sc->sc_num = hipp_num_chips++;
4944 +
4945 +       if (sc->sc_num < HIPP_MAX_CHIPS)
4946 +               hipp_chip_idx[sc->sc_num] = sc;
4947 +
4948 +       pci_set_drvdata(sc->sc_pcidev, sc);
4949 +
4950 +       spin_lock_init(&sc->sc_mtx);
4951 +
4952 +       /*
4953 +        * Setup PCI resources.
4954 +        * The READ_REG_0, WRITE_REG_0, READ_REG_1,
4955 +        * and WRITE_REG_1 macros throughout the driver are used
4956 +        * to permit better debugging.
4957 +        */
4958 +       for(i=0; i<4; i++) {
4959 +               unsigned long mem_start, mem_len;
4960 +               mem_start = pci_resource_start(sc->sc_pcidev, i);
4961 +               mem_len   = pci_resource_len(sc->sc_pcidev, i);
4962 +               sc->sc_barphy[i] = (caddr_t)mem_start;
4963 +               sc->sc_bar[i] = (ocf_iomem_t) ioremap(mem_start, mem_len);
4964 +               if (!sc->sc_bar[i]) {
4965 +                       device_printf(sc->sc_dev, "cannot map bar%d register space\n", i);
4966 +                       goto fail;
4967 +               }
4968 +       }
4969 +
4970 +       //hipp_reset_board(sc, 0);
4971 +       pci_set_master(sc->sc_pcidev);
4972 +
4973 +       /*
4974 +        * Arrange the interrupt line.
4975 +        */
4976 +       rc = request_irq(dev->irq, hipp_intr, IRQF_SHARED, "hifn", sc);
4977 +       if (rc) {
4978 +               device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
4979 +               goto fail;
4980 +       }
4981 +       sc->sc_irq = dev->irq;
4982 +
4983 +       rev = READ_REG_1(sc, HIPP_1_REVID) & 0xffff;
4984 +
4985 +       {
4986 +               char b[32];
4987 +               device_printf(sc->sc_dev, "%s, rev %u",
4988 +                             hipp_partname(sc, b, sizeof(b)), rev);
4989 +       }
4990 +
4991 +#if 0
4992 +       if (sc->sc_flags & HIFN_IS_7956)
4993 +               printf(", pll=0x%x<%s clk, %ux mult>",
4994 +                       sc->sc_pllconfig,
4995 +                       sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
4996 +                       2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
4997 +#endif
4998 +       printf("\n");
4999 +
5000 +       sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
5001 +       if (sc->sc_cid < 0) {
5002 +               device_printf(sc->sc_dev, "could not get crypto driver id\n");
5003 +               goto fail;
5004 +       }
5005 +
5006 +#if 0 /* cannot work with a non-GPL module */
5007 +       /* make a sysfs entry to let the world know what entry we got */
5008 +       sysfs_create_file(&sc->sc_pcidev->dev.kobj, &hipp_dev_cryptoid.attr);
5009 +#endif
5010 +
5011 +#if 0
5012 +       init_timer(&sc->sc_tickto);
5013 +       sc->sc_tickto.function = hifn_tick;
5014 +       sc->sc_tickto.data = (unsigned long) sc->sc_num;
5015 +       mod_timer(&sc->sc_tickto, jiffies + HZ);
5016 +#endif
5017 +
5018 +#if 0 /* no code here yet ?? */
5019 +       crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
5020 +#endif
5021 +
5022 +       return (0);
5023 +
5024 +fail:
5025 +       if (sc->sc_cid >= 0)
5026 +               crypto_unregister_all(sc->sc_cid);
5027 +       if (sc->sc_irq != -1)
5028 +               free_irq(sc->sc_irq, sc);
5029 +       
5030 +#if 0
5031 +       if (sc->sc_dma) {
5032 +               /* Turn off DMA polling */
5033 +               WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
5034 +                           HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
5035 +               
5036 +               pci_free_consistent(sc->sc_pcidev,
5037 +                                   sizeof(*sc->sc_dma),
5038 +                                   sc->sc_dma, sc->sc_dma_physaddr);
5039 +       }
5040 +#endif
5041 +       kfree(sc);
5042 +       return (-ENXIO);
5043 +}
5044 +
5045 +/*
5046 + * Detach an interface that successfully probed.
5047 + */
5048 +static void
5049 +hipp_remove(struct pci_dev *dev)
5050 +{
5051 +       struct hipp_softc *sc = pci_get_drvdata(dev);
5052 +       unsigned long l_flags;
5053 +
5054 +       DPRINTF("%s()\n", __FUNCTION__);
5055 +
5056 +       /* disable interrupts */
5057 +       HIPP_LOCK(sc);
5058 +
5059 +#if 0
5060 +       WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
5061 +       HIFN_UNLOCK(sc);
5062 +
5063 +       /*XXX other resources */
5064 +       del_timer_sync(&sc->sc_tickto);
5065 +
5066 +       /* Turn off DMA polling */
5067 +       WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
5068 +           HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
5069 +#endif
5070 +
5071 +       crypto_unregister_all(sc->sc_cid);
5072 +
5073 +       free_irq(sc->sc_irq, sc);
5074 +
5075 +#if 0
5076 +       pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
5077 +                sc->sc_dma, sc->sc_dma_physaddr);
5078 +#endif
5079 +}
5080 +
5081 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
5082 +static irqreturn_t hipp_intr(int irq, void *arg)
5083 +#else
5084 +static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs)
5085 +#endif
5086 +{
5087 +       struct hipp_softc *sc = arg;
5088 +
5089 +       sc = sc; /* shut up compiler */
5090 +
5091 +       return IRQ_HANDLED;
5092 +}
5093 +
5094 +static struct pci_device_id hipp_pci_tbl[] = {
5095 +       { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7855,
5096 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
5097 +       { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_8155,
5098 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
5099 +};
5100 +MODULE_DEVICE_TABLE(pci, hipp_pci_tbl);
5101 +
5102 +static struct pci_driver hipp_driver = {
5103 +       .name         = "hipp",
5104 +       .id_table     = hipp_pci_tbl,
5105 +       .probe        = hipp_probe,
5106 +       .remove       = hipp_remove,
5107 +       /* add PM stuff here one day */
5108 +};
5109 +
5110 +static int __init hipp_init (void)
5111 +{
5112 +       struct hipp_softc *sc = NULL;
5113 +       int rc;
5114 +
5115 +       DPRINTF("%s(%p)\n", __FUNCTION__, hipp_init);
5116 +
5117 +       rc = pci_register_driver(&hipp_driver);
5118 +       pci_register_driver_compat(&hipp_driver, rc);
5119 +
5120 +       return rc;
5121 +}
5122 +
5123 +static void __exit hipp_exit (void)
5124 +{
5125 +       pci_unregister_driver(&hipp_driver);
5126 +}
5127 +
5128 +module_init(hipp_init);
5129 +module_exit(hipp_exit);
5130 +
5131 +MODULE_LICENSE("BSD");
5132 +MODULE_AUTHOR("Michael Richardson <mcr@xelerance.com>");
5133 +MODULE_DESCRIPTION("OCF driver for hifn HIPP-I/II PCI crypto devices");
5134 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
5135 +++ linux/crypto/ocf/hifn/hifnHIPPreg.h 2007-07-25 10:11:22.000000000 +1000
5136 @@ -0,0 +1,46 @@
5137 +/*-
5138 + * Hifn HIPP-I/HIPP-II (7855/8155) driver.
5139 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
5140 + *
5141 + * Redistribution and use in source and binary forms, with or without
5142 + * modification, are permitted provided that the following conditions
5143 + * are met:
5144 + *
5145 + * 1. Redistributions of source code must retain the above copyright
5146 + *    notice, this list of conditions and the following disclaimer.
5147 + * 2. Redistributions in binary form must reproduce the above copyright
5148 + *    notice, this list of conditions and the following disclaimer in the
5149 + *    documentation and/or other materials provided with the distribution.
5150 + * 3. The name of the author may not be used to endorse or promote products
5151 + *    derived from this software without specific prior written permission.
5152 + *
5153 + *
5154 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
5155 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
5156 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
5157 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
5158 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
5159 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
5160 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
5161 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5162 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
5163 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5164 + *
5165 + * Effort sponsored by Hifn inc.
5166 + *
5167 + */
5168 +
5169 +#ifndef __HIFNHIPP_H__
5170 +#define        __HIFNHIPP_H__
5171 +
5172 +/*
5173 + * PCI vendor and device identifiers
5174 + */
5175 +#define        PCI_VENDOR_HIFN         0x13a3          /* Hifn */
5176 +#define        PCI_PRODUCT_HIFN_6500   0x0006          /* 6500 */
5177 +#define        PCI_PRODUCT_HIFN_7855   0x001f          /* 7855 */
5178 +#define        PCI_PRODUCT_HIFN_8155   0x999           /* XXX 8155 */
5179 +
5180 +#define HIPP_1_REVID            0x01 /* BOGUS */
5181 +
5182 +#endif /* __HIPP_H__ */
5183 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
5184 +++ linux/crypto/ocf/hifn/hifnHIPPvar.h 2007-07-25 13:47:04.000000000 +1000
5185 @@ -0,0 +1,93 @@
5186 +/*
5187 + * Hifn HIPP-I/HIPP-II (7855/8155) driver.
5188 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com> * 
5189 + *
5190 + * Redistribution and use in source and binary forms, with or without
5191 + * modification, are permitted provided that the following conditions
5192 + * are met:
5193 + *
5194 + * 1. Redistributions of source code must retain the above copyright
5195 + *    notice, this list of conditions and the following disclaimer.
5196 + * 2. Redistributions in binary form must reproduce the above copyright
5197 + *    notice, this list of conditions and the following disclaimer in the
5198 + *    documentation and/or other materials provided with the distribution.
5199 + * 3. The name of the author may not be used to endorse or promote products
5200 + *    derived from this software without specific prior written permission.
5201 + *
5202 + *
5203 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
5204 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
5205 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
5206 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
5207 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
5208 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
5209 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
5210 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5211 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
5212 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5213 + *
5214 + * Effort sponsored by Hifn inc.
5215 + *
5216 + */
5217 +
5218 +#ifndef __HIFNHIPPVAR_H__
5219 +#define __HIFNHIPPVAR_H__
5220 +
5221 +#define HIPP_MAX_CHIPS 8
5222 +
5223 +/*
5224 + * Holds data specific to a single Hifn HIPP-I board.
5225 + */
5226 +struct hipp_softc {
5227 +       softc_device_decl                sc_dev;
5228 +
5229 +       struct pci_dev          *sc_pcidev;     /* device backpointer */
5230 +       ocf_iomem_t             sc_bar[5];
5231 +       caddr_t                 sc_barphy[5];   /* physical address */
5232 +       int                     sc_num;         /* for multiple devs */
5233 +       spinlock_t              sc_mtx;         /* per-instance lock */
5234 +       int32_t                 sc_cid;
5235 +       int                     sc_irq;
5236 +
5237 +#if 0
5238 +
5239 +       u_int32_t               sc_dmaier;
5240 +       u_int32_t               sc_drammodel;   /* 1=dram, 0=sram */
5241 +       u_int32_t               sc_pllconfig;   /* 7954/7955/7956 PLL config */
5242 +
5243 +       struct hifn_dma         *sc_dma;
5244 +       dma_addr_t              sc_dma_physaddr;/* physical address of sc_dma */
5245 +
5246 +       int                     sc_dmansegs;
5247 +       int                     sc_maxses;
5248 +       int                     sc_nsessions;
5249 +       struct hifn_session     *sc_sessions;
5250 +       int                     sc_ramsize;
5251 +       int                     sc_flags;
5252 +#define        HIFN_HAS_RNG            0x1     /* includes random number generator */
5253 +#define        HIFN_HAS_PUBLIC         0x2     /* includes public key support */
5254 +#define        HIFN_HAS_AES            0x4     /* includes AES support */
5255 +#define        HIFN_IS_7811            0x8     /* Hifn 7811 part */
5256 +#define        HIFN_IS_7956            0x10    /* Hifn 7956/7955 don't have SDRAM */
5257 +
5258 +       struct timer_list       sc_tickto;      /* for managing DMA */
5259 +
5260 +       int                     sc_rngfirst;
5261 +       int                     sc_rnghz;       /* RNG polling frequency */
5262 +
5263 +       int                     sc_c_busy;      /* command ring busy */
5264 +       int                     sc_s_busy;      /* source data ring busy */
5265 +       int                     sc_d_busy;      /* destination data ring busy */
5266 +       int                     sc_r_busy;      /* result ring busy */
5267 +       int                     sc_active;      /* for initial countdown */
5268 +       int                     sc_needwakeup;  /* ops q'd wating on resources */
5269 +       int                     sc_curbatch;    /* # ops submitted w/o int */
5270 +       int                     sc_suspended;
5271 +       struct miscdevice       sc_miscdev;
5272 +#endif
5273 +};
5274 +
5275 +#define        HIPP_LOCK(_sc)          spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
5276 +#define        HIPP_UNLOCK(_sc)        spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
5277 +
5278 +#endif /* __HIFNHIPPVAR_H__ */
5279 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
5280 +++ linux/crypto/ocf/safe/md5.c 2005-05-20 10:30:52.000000000 +1000
5281 @@ -0,0 +1,308 @@
5282 +/*     $KAME: md5.c,v 1.5 2000/11/08 06:13:08 itojun Exp $     */
5283 +/*
5284 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5285 + * All rights reserved.
5286 + *
5287 + * Redistribution and use in source and binary forms, with or without
5288 + * modification, are permitted provided that the following conditions
5289 + * are met:
5290 + * 1. Redistributions of source code must retain the above copyright
5291 + *    notice, this list of conditions and the following disclaimer.
5292 + * 2. Redistributions in binary form must reproduce the above copyright
5293 + *    notice, this list of conditions and the following disclaimer in the
5294 + *    documentation and/or other materials provided with the distribution.
5295 + * 3. Neither the name of the project nor the names of its contributors
5296 + *    may be used to endorse or promote products derived from this software
5297 + *    without specific prior written permission.
5298 + *
5299 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
5300 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5301 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5302 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
5303 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
5304 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
5305 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5306 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5307 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5308 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5309 + * SUCH DAMAGE.
5310 + */
5311 +
5312 +#if 0
5313 +#include <sys/cdefs.h>
5314 +__FBSDID("$FreeBSD: src/sys/crypto/md5.c,v 1.9 2004/01/27 19:49:19 des Exp $");
5315 +
5316 +#include <sys/types.h>
5317 +#include <sys/cdefs.h>
5318 +#include <sys/time.h>
5319 +#include <sys/systm.h>
5320 +#include <crypto/md5.h>
5321 +#endif
5322 +
5323 +#define SHIFT(X, s) (((X) << (s)) | ((X) >> (32 - (s))))
5324 +
5325 +#define F(X, Y, Z) (((X) & (Y)) | ((~X) & (Z)))
5326 +#define G(X, Y, Z) (((X) & (Z)) | ((Y) & (~Z)))
5327 +#define H(X, Y, Z) ((X) ^ (Y) ^ (Z))
5328 +#define I(X, Y, Z) ((Y) ^ ((X) | (~Z)))
5329 +
5330 +#define ROUND1(a, b, c, d, k, s, i) { \
5331 +       (a) = (a) + F((b), (c), (d)) + X[(k)] + T[(i)]; \
5332 +       (a) = SHIFT((a), (s)); \
5333 +       (a) = (b) + (a); \
5334 +}
5335 +
5336 +#define ROUND2(a, b, c, d, k, s, i) { \
5337 +       (a) = (a) + G((b), (c), (d)) + X[(k)] + T[(i)]; \
5338 +       (a) = SHIFT((a), (s)); \
5339 +       (a) = (b) + (a); \
5340 +}
5341 +
5342 +#define ROUND3(a, b, c, d, k, s, i) { \
5343 +       (a) = (a) + H((b), (c), (d)) + X[(k)] + T[(i)]; \
5344 +       (a) = SHIFT((a), (s)); \
5345 +       (a) = (b) + (a); \
5346 +}
5347 +
5348 +#define ROUND4(a, b, c, d, k, s, i) { \
5349 +       (a) = (a) + I((b), (c), (d)) + X[(k)] + T[(i)]; \
5350 +       (a) = SHIFT((a), (s)); \
5351 +       (a) = (b) + (a); \
5352 +}
5353 +
5354 +#define Sa      7
5355 +#define Sb     12
5356 +#define Sc     17
5357 +#define Sd     22
5358 +
5359 +#define Se      5
5360 +#define Sf      9
5361 +#define Sg     14
5362 +#define Sh     20
5363 +
5364 +#define Si      4
5365 +#define Sj     11
5366 +#define Sk     16
5367 +#define Sl     23
5368 +
5369 +#define Sm      6
5370 +#define Sn     10
5371 +#define So     15
5372 +#define Sp     21
5373 +
5374 +#define MD5_A0 0x67452301
5375 +#define MD5_B0 0xefcdab89
5376 +#define MD5_C0 0x98badcfe
5377 +#define MD5_D0 0x10325476
5378 +
5379 +/* Integer part of 4294967296 times abs(sin(i)), where i is in radians. */
5380 +static const u_int32_t T[65] = {
5381 +       0,
5382 +       0xd76aa478,     0xe8c7b756,     0x242070db,     0xc1bdceee,
5383 +       0xf57c0faf,     0x4787c62a,     0xa8304613,     0xfd469501,
5384 +       0x698098d8,     0x8b44f7af,     0xffff5bb1,     0x895cd7be,
5385 +       0x6b901122,     0xfd987193,     0xa679438e,     0x49b40821,
5386 +
5387 +       0xf61e2562,     0xc040b340,     0x265e5a51,     0xe9b6c7aa,
5388 +       0xd62f105d,     0x2441453,      0xd8a1e681,     0xe7d3fbc8,
5389 +       0x21e1cde6,     0xc33707d6,     0xf4d50d87,     0x455a14ed,
5390 +       0xa9e3e905,     0xfcefa3f8,     0x676f02d9,     0x8d2a4c8a,
5391 +
5392 +       0xfffa3942,     0x8771f681,     0x6d9d6122,     0xfde5380c,
5393 +       0xa4beea44,     0x4bdecfa9,     0xf6bb4b60,     0xbebfbc70,
5394 +       0x289b7ec6,     0xeaa127fa,     0xd4ef3085,     0x4881d05,
5395 +       0xd9d4d039,     0xe6db99e5,     0x1fa27cf8,     0xc4ac5665,
5396 +
5397 +       0xf4292244,     0x432aff97,     0xab9423a7,     0xfc93a039,
5398 +       0x655b59c3,     0x8f0ccc92,     0xffeff47d,     0x85845dd1,
5399 +       0x6fa87e4f,     0xfe2ce6e0,     0xa3014314,     0x4e0811a1,
5400 +       0xf7537e82,     0xbd3af235,     0x2ad7d2bb,     0xeb86d391,
5401 +};
5402 +
5403 +static const u_int8_t md5_paddat[MD5_BUFLEN] = {
5404 +       0x80,   0,      0,      0,      0,      0,      0,      0,
5405 +       0,      0,      0,      0,      0,      0,      0,      0,
5406 +       0,      0,      0,      0,      0,      0,      0,      0,
5407 +       0,      0,      0,      0,      0,      0,      0,      0,
5408 +       0,      0,      0,      0,      0,      0,      0,      0,
5409 +       0,      0,      0,      0,      0,      0,      0,      0,
5410 +       0,      0,      0,      0,      0,      0,      0,      0,
5411 +       0,      0,      0,      0,      0,      0,      0,      0,      
5412 +};
5413 +
5414 +static void md5_calc(u_int8_t *, md5_ctxt *);
5415 +
5416 +void md5_init(ctxt)
5417 +       md5_ctxt *ctxt;
5418 +{
5419 +       ctxt->md5_n = 0;
5420 +       ctxt->md5_i = 0;
5421 +       ctxt->md5_sta = MD5_A0;
5422 +       ctxt->md5_stb = MD5_B0;
5423 +       ctxt->md5_stc = MD5_C0;
5424 +       ctxt->md5_std = MD5_D0;
5425 +       bzero(ctxt->md5_buf, sizeof(ctxt->md5_buf));
5426 +}
5427 +
5428 +void md5_loop(ctxt, input, len)
5429 +       md5_ctxt *ctxt;
5430 +       u_int8_t *input;
5431 +       u_int len; /* number of bytes */
5432 +{
5433 +       u_int gap, i;
5434 +
5435 +       ctxt->md5_n += len * 8; /* byte to bit */
5436 +       gap = MD5_BUFLEN - ctxt->md5_i;
5437 +
5438 +       if (len >= gap) {
5439 +               bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
5440 +                       gap);
5441 +               md5_calc(ctxt->md5_buf, ctxt);
5442 +
5443 +               for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN) {
5444 +                       md5_calc((u_int8_t *)(input + i), ctxt);
5445 +               }
5446 +               
5447 +               ctxt->md5_i = len - i;
5448 +               bcopy((void *)(input + i), (void *)ctxt->md5_buf, ctxt->md5_i);
5449 +       } else {
5450 +               bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
5451 +                       len);
5452 +               ctxt->md5_i += len;
5453 +       }
5454 +}
5455 +
5456 +void md5_pad(ctxt)
5457 +       md5_ctxt *ctxt;
5458 +{
5459 +       u_int gap;
5460 +
5461 +       /* Don't count up padding. Keep md5_n. */       
5462 +       gap = MD5_BUFLEN - ctxt->md5_i;
5463 +       if (gap > 8) {
5464 +               bcopy(md5_paddat,
5465 +                     (void *)(ctxt->md5_buf + ctxt->md5_i),
5466 +                     gap - sizeof(ctxt->md5_n));
5467 +       } else {
5468 +               /* including gap == 8 */
5469 +               bcopy(md5_paddat, (void *)(ctxt->md5_buf + ctxt->md5_i),
5470 +                       gap);
5471 +               md5_calc(ctxt->md5_buf, ctxt);
5472 +               bcopy((md5_paddat + gap),
5473 +                     (void *)ctxt->md5_buf,
5474 +                     MD5_BUFLEN - sizeof(ctxt->md5_n));
5475 +       }
5476 +
5477 +       /* 8 byte word */       
5478 +#if BYTE_ORDER == LITTLE_ENDIAN
5479 +       bcopy(&ctxt->md5_n8[0], &ctxt->md5_buf[56], 8);
5480 +#endif
5481 +#if BYTE_ORDER == BIG_ENDIAN
5482 +       ctxt->md5_buf[56] = ctxt->md5_n8[7];
5483 +       ctxt->md5_buf[57] = ctxt->md5_n8[6];
5484 +       ctxt->md5_buf[58] = ctxt->md5_n8[5];
5485 +       ctxt->md5_buf[59] = ctxt->md5_n8[4];
5486 +       ctxt->md5_buf[60] = ctxt->md5_n8[3];
5487 +       ctxt->md5_buf[61] = ctxt->md5_n8[2];
5488 +       ctxt->md5_buf[62] = ctxt->md5_n8[1];
5489 +       ctxt->md5_buf[63] = ctxt->md5_n8[0];
5490 +#endif
5491 +
5492 +       md5_calc(ctxt->md5_buf, ctxt);
5493 +}
5494 +
5495 +void md5_result(digest, ctxt)
5496 +       u_int8_t *digest;
5497 +       md5_ctxt *ctxt;
5498 +{
5499 +       /* 4 byte words */
5500 +#if BYTE_ORDER == LITTLE_ENDIAN
5501 +       bcopy(&ctxt->md5_st8[0], digest, 16);
5502 +#endif
5503 +#if BYTE_ORDER == BIG_ENDIAN
5504 +       digest[ 0] = ctxt->md5_st8[ 3]; digest[ 1] = ctxt->md5_st8[ 2];
5505 +       digest[ 2] = ctxt->md5_st8[ 1]; digest[ 3] = ctxt->md5_st8[ 0];
5506 +       digest[ 4] = ctxt->md5_st8[ 7]; digest[ 5] = ctxt->md5_st8[ 6];
5507 +       digest[ 6] = ctxt->md5_st8[ 5]; digest[ 7] = ctxt->md5_st8[ 4];
5508 +       digest[ 8] = ctxt->md5_st8[11]; digest[ 9] = ctxt->md5_st8[10];
5509 +       digest[10] = ctxt->md5_st8[ 9]; digest[11] = ctxt->md5_st8[ 8];
5510 +       digest[12] = ctxt->md5_st8[15]; digest[13] = ctxt->md5_st8[14];
5511 +       digest[14] = ctxt->md5_st8[13]; digest[15] = ctxt->md5_st8[12];
5512 +#endif
5513 +}
5514 +
5515 +static void md5_calc(b64, ctxt)
5516 +       u_int8_t *b64;
5517 +       md5_ctxt *ctxt;
5518 +{
5519 +       u_int32_t A = ctxt->md5_sta;
5520 +       u_int32_t B = ctxt->md5_stb;
5521 +       u_int32_t C = ctxt->md5_stc;
5522 +       u_int32_t D = ctxt->md5_std;
5523 +#if BYTE_ORDER == LITTLE_ENDIAN
5524 +       u_int32_t *X = (u_int32_t *)b64;
5525 +#endif 
5526 +#if BYTE_ORDER == BIG_ENDIAN
5527 +       /* 4 byte words */
5528 +       /* what a brute force but fast! */
5529 +       u_int32_t X[16];
5530 +       u_int8_t *y = (u_int8_t *)X;
5531 +       y[ 0] = b64[ 3]; y[ 1] = b64[ 2]; y[ 2] = b64[ 1]; y[ 3] = b64[ 0];
5532 +       y[ 4] = b64[ 7]; y[ 5] = b64[ 6]; y[ 6] = b64[ 5]; y[ 7] = b64[ 4];
5533 +       y[ 8] = b64[11]; y[ 9] = b64[10]; y[10] = b64[ 9]; y[11] = b64[ 8];
5534 +       y[12] = b64[15]; y[13] = b64[14]; y[14] = b64[13]; y[15] = b64[12];
5535 +       y[16] = b64[19]; y[17] = b64[18]; y[18] = b64[17]; y[19] = b64[16];
5536 +       y[20] = b64[23]; y[21] = b64[22]; y[22] = b64[21]; y[23] = b64[20];
5537 +       y[24] = b64[27]; y[25] = b64[26]; y[26] = b64[25]; y[27] = b64[24];
5538 +       y[28] = b64[31]; y[29] = b64[30]; y[30] = b64[29]; y[31] = b64[28];
5539 +       y[32] = b64[35]; y[33] = b64[34]; y[34] = b64[33]; y[35] = b64[32];
5540 +       y[36] = b64[39]; y[37] = b64[38]; y[38] = b64[37]; y[39] = b64[36];
5541 +       y[40] = b64[43]; y[41] = b64[42]; y[42] = b64[41]; y[43] = b64[40];
5542 +       y[44] = b64[47]; y[45] = b64[46]; y[46] = b64[45]; y[47] = b64[44];
5543 +       y[48] = b64[51]; y[49] = b64[50]; y[50] = b64[49]; y[51] = b64[48];
5544 +       y[52] = b64[55]; y[53] = b64[54]; y[54] = b64[53]; y[55] = b64[52];
5545 +       y[56] = b64[59]; y[57] = b64[58]; y[58] = b64[57]; y[59] = b64[56];
5546 +       y[60] = b64[63]; y[61] = b64[62]; y[62] = b64[61]; y[63] = b64[60];
5547 +#endif
5548 +
5549 +       ROUND1(A, B, C, D,  0, Sa,  1); ROUND1(D, A, B, C,  1, Sb,  2);
5550 +       ROUND1(C, D, A, B,  2, Sc,  3); ROUND1(B, C, D, A,  3, Sd,  4);
5551 +       ROUND1(A, B, C, D,  4, Sa,  5); ROUND1(D, A, B, C,  5, Sb,  6);
5552 +       ROUND1(C, D, A, B,  6, Sc,  7); ROUND1(B, C, D, A,  7, Sd,  8);
5553 +       ROUND1(A, B, C, D,  8, Sa,  9); ROUND1(D, A, B, C,  9, Sb, 10);
5554 +       ROUND1(C, D, A, B, 10, Sc, 11); ROUND1(B, C, D, A, 11, Sd, 12);
5555 +       ROUND1(A, B, C, D, 12, Sa, 13); ROUND1(D, A, B, C, 13, Sb, 14);
5556 +       ROUND1(C, D, A, B, 14, Sc, 15); ROUND1(B, C, D, A, 15, Sd, 16);
5557 +       
5558 +       ROUND2(A, B, C, D,  1, Se, 17); ROUND2(D, A, B, C,  6, Sf, 18);
5559 +       ROUND2(C, D, A, B, 11, Sg, 19); ROUND2(B, C, D, A,  0, Sh, 20);
5560 +       ROUND2(A, B, C, D,  5, Se, 21); ROUND2(D, A, B, C, 10, Sf, 22);
5561 +       ROUND2(C, D, A, B, 15, Sg, 23); ROUND2(B, C, D, A,  4, Sh, 24);
5562 +       ROUND2(A, B, C, D,  9, Se, 25); ROUND2(D, A, B, C, 14, Sf, 26);
5563 +       ROUND2(C, D, A, B,  3, Sg, 27); ROUND2(B, C, D, A,  8, Sh, 28);
5564 +       ROUND2(A, B, C, D, 13, Se, 29); ROUND2(D, A, B, C,  2, Sf, 30);
5565 +       ROUND2(C, D, A, B,  7, Sg, 31); ROUND2(B, C, D, A, 12, Sh, 32);
5566 +
5567 +       ROUND3(A, B, C, D,  5, Si, 33); ROUND3(D, A, B, C,  8, Sj, 34);
5568 +       ROUND3(C, D, A, B, 11, Sk, 35); ROUND3(B, C, D, A, 14, Sl, 36);
5569 +       ROUND3(A, B, C, D,  1, Si, 37); ROUND3(D, A, B, C,  4, Sj, 38);
5570 +       ROUND3(C, D, A, B,  7, Sk, 39); ROUND3(B, C, D, A, 10, Sl, 40);
5571 +       ROUND3(A, B, C, D, 13, Si, 41); ROUND3(D, A, B, C,  0, Sj, 42);
5572 +       ROUND3(C, D, A, B,  3, Sk, 43); ROUND3(B, C, D, A,  6, Sl, 44);
5573 +       ROUND3(A, B, C, D,  9, Si, 45); ROUND3(D, A, B, C, 12, Sj, 46);
5574 +       ROUND3(C, D, A, B, 15, Sk, 47); ROUND3(B, C, D, A,  2, Sl, 48);
5575 +       
5576 +       ROUND4(A, B, C, D,  0, Sm, 49); ROUND4(D, A, B, C,  7, Sn, 50); 
5577 +       ROUND4(C, D, A, B, 14, So, 51); ROUND4(B, C, D, A,  5, Sp, 52); 
5578 +       ROUND4(A, B, C, D, 12, Sm, 53); ROUND4(D, A, B, C,  3, Sn, 54); 
5579 +       ROUND4(C, D, A, B, 10, So, 55); ROUND4(B, C, D, A,  1, Sp, 56); 
5580 +       ROUND4(A, B, C, D,  8, Sm, 57); ROUND4(D, A, B, C, 15, Sn, 58); 
5581 +       ROUND4(C, D, A, B,  6, So, 59); ROUND4(B, C, D, A, 13, Sp, 60); 
5582 +       ROUND4(A, B, C, D,  4, Sm, 61); ROUND4(D, A, B, C, 11, Sn, 62); 
5583 +       ROUND4(C, D, A, B,  2, So, 63); ROUND4(B, C, D, A,  9, Sp, 64);
5584 +
5585 +       ctxt->md5_sta += A;
5586 +       ctxt->md5_stb += B;
5587 +       ctxt->md5_stc += C;
5588 +       ctxt->md5_std += D;
5589 +}
5590 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
5591 +++ linux/crypto/ocf/safe/md5.h 2005-05-20 10:30:52.000000000 +1000
5592 @@ -0,0 +1,76 @@
5593 +/*     $FreeBSD: src/sys/crypto/md5.h,v 1.4 2002/03/20 05:13:50 alfred Exp $   */
5594 +/*     $KAME: md5.h,v 1.4 2000/03/27 04:36:22 sumikawa Exp $   */
5595 +
5596 +/*
5597 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5598 + * All rights reserved.
5599 + *
5600 + * Redistribution and use in source and binary forms, with or without
5601 + * modification, are permitted provided that the following conditions
5602 + * are met:
5603 + * 1. Redistributions of source code must retain the above copyright
5604 + *    notice, this list of conditions and the following disclaimer.
5605 + * 2. Redistributions in binary form must reproduce the above copyright
5606 + *    notice, this list of conditions and the following disclaimer in the
5607 + *    documentation and/or other materials provided with the distribution.
5608 + * 3. Neither the name of the project nor the names of its contributors
5609 + *    may be used to endorse or promote products derived from this software
5610 + *    without specific prior written permission.
5611 + *
5612 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
5613 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5614 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5615 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
5616 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
5617 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
5618 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5619 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5620 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5621 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5622 + * SUCH DAMAGE.
5623 + */
5624 +
5625 +#ifndef _NETINET6_MD5_H_
5626 +#define _NETINET6_MD5_H_
5627 +
5628 +#define MD5_BUFLEN     64
5629 +
5630 +typedef struct {
5631 +       union {
5632 +               u_int32_t       md5_state32[4];
5633 +               u_int8_t        md5_state8[16];
5634 +       } md5_st;
5635 +
5636 +#define md5_sta                md5_st.md5_state32[0]
5637 +#define md5_stb                md5_st.md5_state32[1]
5638 +#define md5_stc                md5_st.md5_state32[2]
5639 +#define md5_std                md5_st.md5_state32[3]
5640 +#define md5_st8                md5_st.md5_state8
5641 +
5642 +       union {
5643 +               u_int64_t       md5_count64;
5644 +               u_int8_t        md5_count8[8];
5645 +       } md5_count;
5646 +#define md5_n  md5_count.md5_count64
5647 +#define md5_n8 md5_count.md5_count8
5648 +
5649 +       u_int   md5_i;
5650 +       u_int8_t        md5_buf[MD5_BUFLEN];
5651 +} md5_ctxt;
5652 +
5653 +extern void md5_init(md5_ctxt *);
5654 +extern void md5_loop(md5_ctxt *, u_int8_t *, u_int);
5655 +extern void md5_pad(md5_ctxt *);
5656 +extern void md5_result(u_int8_t *, md5_ctxt *);
5657 +
5658 +/* compatibility */
5659 +#define MD5_CTX                md5_ctxt
5660 +#define MD5Init(x)     md5_init((x))
5661 +#define MD5Update(x, y, z)     md5_loop((x), (y), (z))
5662 +#define MD5Final(x, y) \
5663 +do {                           \
5664 +       md5_pad((y));           \
5665 +       md5_result((x), (y));   \
5666 +} while (0)
5667 +
5668 +#endif /* ! _NETINET6_MD5_H_*/
5669 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
5670 +++ linux/crypto/ocf/safe/safe.c        2007-07-27 11:34:59.000000000 +1000
5671 @@ -0,0 +1,2288 @@
5672 +/*-
5673 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
5674 + * Copyright (C) 2004-2007 David McCullough
5675 + * The license and original author are listed below.
5676 + *
5677 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
5678 + * Copyright (c) 2003 Global Technology Associates, Inc.
5679 + * All rights reserved.
5680 + *
5681 + * Redistribution and use in source and binary forms, with or without
5682 + * modification, are permitted provided that the following conditions
5683 + * are met:
5684 + * 1. Redistributions of source code must retain the above copyright
5685 + *    notice, this list of conditions and the following disclaimer.
5686 + * 2. Redistributions in binary form must reproduce the above copyright
5687 + *    notice, this list of conditions and the following disclaimer in the
5688 + *    documentation and/or other materials provided with the distribution.
5689 + *
5690 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
5691 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5692 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5693 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
5694 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
5695 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
5696 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5697 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5698 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5699 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5700 + * SUCH DAMAGE.
5701 + *
5702 +__FBSDID("$FreeBSD: src/sys/dev/safe/safe.c,v 1.18 2007/03/21 03:42:50 sam Exp $");
5703 + */
5704 +
5705 +#ifndef AUTOCONF_INCLUDED
5706 +#include <linux/config.h>
5707 +#endif
5708 +#include <linux/module.h>
5709 +#include <linux/kernel.h>
5710 +#include <linux/init.h>
5711 +#include <linux/list.h>
5712 +#include <linux/slab.h>
5713 +#include <linux/wait.h>
5714 +#include <linux/sched.h>
5715 +#include <linux/pci.h>
5716 +#include <linux/delay.h>
5717 +#include <linux/interrupt.h>
5718 +#include <linux/spinlock.h>
5719 +#include <linux/random.h>
5720 +#include <linux/version.h>
5721 +#include <linux/skbuff.h>
5722 +#include <asm/io.h>
5723 +
5724 +/*
5725 + * SafeNet SafeXcel-1141 hardware crypto accelerator
5726 + */
5727 +
5728 +#include <cryptodev.h>
5729 +#include <uio.h>
5730 +#include <safe/safereg.h>
5731 +#include <safe/safevar.h>
5732 +
5733 +#if 1
5734 +#define        DPRINTF(a)      do { \
5735 +                                               if (debug) { \
5736 +                                                       printk("%s: ", sc ? \
5737 +                                                               device_get_nameunit(sc->sc_dev) : "safe"); \
5738 +                                                       printk a; \
5739 +                                               } \
5740 +                                       } while (0)
5741 +#else
5742 +#define        DPRINTF(a)
5743 +#endif
5744 +
5745 +/*
5746 + * until we find a cleaner way, include the BSD md5/sha1 code
5747 + * here
5748 + */
5749 +#define HMAC_HACK 1
5750 +#ifdef HMAC_HACK
5751 +#define LITTLE_ENDIAN 1234
5752 +#define BIG_ENDIAN 4321
5753 +#ifdef __LITTLE_ENDIAN
5754 +#define BYTE_ORDER LITTLE_ENDIAN
5755 +#endif
5756 +#ifdef __BIG_ENDIAN
5757 +#define BYTE_ORDER BIG_ENDIAN
5758 +#endif
5759 +#include <safe/md5.h>
5760 +#include <safe/md5.c>
5761 +#include <safe/sha1.h>
5762 +#include <safe/sha1.c>
5763 +
5764 +u_int8_t hmac_ipad_buffer[64] = {
5765 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5766 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5767 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5768 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5769 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5770 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5771 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5772 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
5773 +};
5774 +
5775 +u_int8_t hmac_opad_buffer[64] = {
5776 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5777 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5778 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5779 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5780 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5781 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5782 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5783 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
5784 +};
5785 +#endif /* HMAC_HACK */
5786 +
5787 +/* add proc entry for this */
5788 +struct safe_stats safestats;
5789 +
5790 +#define debug safe_debug
5791 +int safe_debug = 0;
5792 +module_param(safe_debug, int, 0644);
5793 +MODULE_PARM_DESC(safe_debug, "Enable debug");
5794 +
5795 +static void safe_callback(struct safe_softc *, struct safe_ringentry *);
5796 +static void safe_feed(struct safe_softc *, struct safe_ringentry *);
5797 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
5798 +static void safe_rng_init(struct safe_softc *);
5799 +int safe_rngbufsize = 8;               /* 32 bytes each read  */
5800 +module_param(safe_rngbufsize, int, 0644);
5801 +MODULE_PARM_DESC(safe_rngbufsize, "RNG polling buffer size (32-bit words)");
5802 +int safe_rngmaxalarm = 8;              /* max alarms before reset */
5803 +module_param(safe_rngmaxalarm, int, 0644);
5804 +MODULE_PARM_DESC(safe_rngmaxalarm, "RNG max alarms before reset");
5805 +#endif /* SAFE_NO_RNG */
5806 +
5807 +static void safe_totalreset(struct safe_softc *sc);
5808 +static int safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op);
5809 +static int safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op);
5810 +static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re);
5811 +static int safe_kprocess(device_t dev, struct cryptkop *krp, int hint);
5812 +static int safe_kstart(struct safe_softc *sc);
5813 +static int safe_ksigbits(struct safe_softc *sc, struct crparam *cr);
5814 +static void safe_kfeed(struct safe_softc *sc);
5815 +static void safe_kpoll(unsigned long arg);
5816 +static void safe_kload_reg(struct safe_softc *sc, u_int32_t off,
5817 +                                                               u_int32_t len, struct crparam *n);
5818 +
5819 +static int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
5820 +static int safe_freesession(device_t, u_int64_t);
5821 +static int safe_process(device_t, struct cryptop *, int);
5822 +
5823 +static device_method_t safe_methods = {
5824 +       /* crypto device methods */
5825 +       DEVMETHOD(cryptodev_newsession, safe_newsession),
5826 +       DEVMETHOD(cryptodev_freesession,safe_freesession),
5827 +       DEVMETHOD(cryptodev_process,    safe_process),
5828 +       DEVMETHOD(cryptodev_kprocess,   safe_kprocess),
5829 +};
5830 +
5831 +#define        READ_REG(sc,r)                  readl((sc)->sc_base_addr + (r))
5832 +#define WRITE_REG(sc,r,val)            writel((val), (sc)->sc_base_addr + (r))
5833 +
5834 +#define SAFE_MAX_CHIPS 8
5835 +static struct safe_softc *safe_chip_idx[SAFE_MAX_CHIPS];
5836 +
5837 +/*
5838 + * split our buffers up into safe DMAable byte fragments to avoid lockup
5839 + * bug in 1141 HW on rev 1.0.
5840 + */
5841 +
5842 +static int
5843 +pci_map_linear(
5844 +       struct safe_softc *sc,
5845 +       struct safe_operand *buf,
5846 +       void *addr,
5847 +       int len)
5848 +{
5849 +       dma_addr_t tmp;
5850 +       int chunk, tlen = len;
5851 +
5852 +       tmp = pci_map_single(sc->sc_pcidev, addr, len, PCI_DMA_BIDIRECTIONAL);
5853 +
5854 +       buf->mapsize += len;
5855 +       while (len > 0) {
5856 +               chunk = (len > sc->sc_max_dsize) ? sc->sc_max_dsize : len;
5857 +               buf->segs[buf->nsegs].ds_addr = tmp;
5858 +               buf->segs[buf->nsegs].ds_len  = chunk;
5859 +               buf->segs[buf->nsegs].ds_tlen = tlen;
5860 +               buf->nsegs++;
5861 +               tmp  += chunk;
5862 +               len  -= chunk;
5863 +               tlen = 0;
5864 +       }
5865 +       return 0;
5866 +}
5867 +
5868 +/*
5869 + * map in a given uio buffer (great on some arches :-)
5870 + */
5871 +
5872 +static int
5873 +pci_map_uio(struct safe_softc *sc, struct safe_operand *buf, struct uio *uio)
5874 +{
5875 +       struct iovec *iov = uio->uio_iov;
5876 +       int n;
5877 +
5878 +       DPRINTF(("%s()\n", __FUNCTION__));
5879 +
5880 +       buf->mapsize = 0;
5881 +       buf->nsegs = 0;
5882 +
5883 +       for (n = 0; n < uio->uio_iovcnt; n++) {
5884 +               pci_map_linear(sc, buf, iov->iov_base, iov->iov_len);
5885 +               iov++;
5886 +       }
5887 +
5888 +       /* identify this buffer by the first segment */
5889 +       buf->map = (void *) buf->segs[0].ds_addr;
5890 +       return(0);
5891 +}
5892 +
5893 +/*
5894 + * map in a given sk_buff
5895 + */
5896 +
5897 +static int
5898 +pci_map_skb(struct safe_softc *sc,struct safe_operand *buf,struct sk_buff *skb)
5899 +{
5900 +       int i;
5901 +
5902 +       DPRINTF(("%s()\n", __FUNCTION__));
5903 +
5904 +       buf->mapsize = 0;
5905 +       buf->nsegs = 0;
5906 +
5907 +       pci_map_linear(sc, buf, skb->data, skb_headlen(skb));
5908 +
5909 +       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5910 +               pci_map_linear(sc, buf,
5911 +                               page_address(skb_shinfo(skb)->frags[i].page) +
5912 +                                                       skb_shinfo(skb)->frags[i].page_offset,
5913 +                               skb_shinfo(skb)->frags[i].size);
5914 +       }
5915 +
5916 +       /* identify this buffer by the first segment */
5917 +       buf->map = (void *) buf->segs[0].ds_addr;
5918 +       return(0);
5919 +}
5920 +
5921 +
5922 +#if 0 /* not needed at this time */
5923 +static void
5924 +pci_sync_operand(struct safe_softc *sc, struct safe_operand *buf)
5925 +{
5926 +       int i;
5927 +
5928 +       DPRINTF(("%s()\n", __FUNCTION__));
5929 +       for (i = 0; i < buf->nsegs; i++)
5930 +               pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
5931 +                               buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
5932 +}
5933 +#endif
5934 +
5935 +static void
5936 +pci_unmap_operand(struct safe_softc *sc, struct safe_operand *buf)
5937 +{
5938 +       int i;
5939 +       DPRINTF(("%s()\n", __FUNCTION__));
5940 +       for (i = 0; i < buf->nsegs; i++) {
5941 +               if (buf->segs[i].ds_tlen) {
5942 +                       DPRINTF(("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
5943 +                       pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
5944 +                                       buf->segs[i].ds_tlen, PCI_DMA_BIDIRECTIONAL);
5945 +                       DPRINTF(("%s - unmap %d 0x%x %d done\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
5946 +               }
5947 +               buf->segs[i].ds_addr = 0;
5948 +               buf->segs[i].ds_len = 0;
5949 +               buf->segs[i].ds_tlen = 0;
5950 +       }
5951 +       buf->nsegs = 0;
5952 +       buf->mapsize = 0;
5953 +       buf->map = 0;
5954 +}
5955 +
5956 +
5957 +/*
5958 + * SafeXcel Interrupt routine
5959 + */
5960 +static irqreturn_t
5961 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
5962 +safe_intr(int irq, void *arg)
5963 +#else
5964 +safe_intr(int irq, void *arg, struct pt_regs *regs)
5965 +#endif
5966 +{
5967 +       struct safe_softc *sc = arg;
5968 +       int stat;
5969 +       unsigned long flags;
5970 +
5971 +       stat = READ_REG(sc, SAFE_HM_STAT);
5972 +
5973 +       DPRINTF(("%s(stat=0x%x)\n", __FUNCTION__, stat));
5974 +
5975 +       if (stat == 0)          /* shared irq, not for us */
5976 +               return IRQ_NONE;
5977 +
5978 +       WRITE_REG(sc, SAFE_HI_CLR, stat);       /* IACK */
5979 +
5980 +       if ((stat & SAFE_INT_PE_DDONE)) {
5981 +               /*
5982 +                * Descriptor(s) done; scan the ring and
5983 +                * process completed operations.
5984 +                */
5985 +               spin_lock_irqsave(&sc->sc_ringmtx, flags);
5986 +               while (sc->sc_back != sc->sc_front) {
5987 +                       struct safe_ringentry *re = sc->sc_back;
5988 +
5989 +#ifdef SAFE_DEBUG
5990 +                       if (debug) {
5991 +                               safe_dump_ringstate(sc, __func__);
5992 +                               safe_dump_request(sc, __func__, re);
5993 +                       }
5994 +#endif
5995 +                       /*
5996 +                        * safe_process marks ring entries that were allocated
5997 +                        * but not used with a csr of zero.  This insures the
5998 +                        * ring front pointer never needs to be set backwards
5999 +                        * in the event that an entry is allocated but not used
6000 +                        * because of a setup error.
6001 +                        */
6002 +                       DPRINTF(("%s re->re_desc.d_csr=0x%x\n", __FUNCTION__, re->re_desc.d_csr));
6003 +                       if (re->re_desc.d_csr != 0) {
6004 +                               if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) {
6005 +                                       DPRINTF(("%s !CSR_IS_DONE\n", __FUNCTION__));
6006 +                                       break;
6007 +                               }
6008 +                               if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) {
6009 +                                       DPRINTF(("%s !LEN_IS_DONE\n", __FUNCTION__));
6010 +                                       break;
6011 +                               }
6012 +                               sc->sc_nqchip--;
6013 +                               safe_callback(sc, re);
6014 +                       }
6015 +                       if (++(sc->sc_back) == sc->sc_ringtop)
6016 +                               sc->sc_back = sc->sc_ring;
6017 +               }
6018 +               spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6019 +       }
6020 +
6021 +       /*
6022 +        * Check to see if we got any DMA Error
6023 +        */
6024 +       if (stat & SAFE_INT_PE_ERROR) {
6025 +               printk("%s: dmaerr dmastat %08x\n", device_get_nameunit(sc->sc_dev),
6026 +                               (int)READ_REG(sc, SAFE_PE_DMASTAT));
6027 +               safestats.st_dmaerr++;
6028 +               safe_totalreset(sc);
6029 +#if 0
6030 +               safe_feed(sc);
6031 +#endif
6032 +       }
6033 +
6034 +       if (sc->sc_needwakeup) {                /* XXX check high watermark */
6035 +               int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
6036 +               DPRINTF(("%s: wakeup crypto %x\n", __func__,
6037 +                       sc->sc_needwakeup));
6038 +               sc->sc_needwakeup &= ~wakeup;
6039 +               crypto_unblock(sc->sc_cid, wakeup);
6040 +       }
6041 +       
6042 +       return IRQ_HANDLED;
6043 +}
6044 +
6045 +/*
6046 + * safe_feed() - post a request to chip
6047 + */
6048 +static void
6049 +safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
6050 +{
6051 +       DPRINTF(("%s()\n", __FUNCTION__));
6052 +#ifdef SAFE_DEBUG
6053 +       if (debug) {
6054 +               safe_dump_ringstate(sc, __func__);
6055 +               safe_dump_request(sc, __func__, re);
6056 +       }
6057 +#endif
6058 +       sc->sc_nqchip++;
6059 +       if (sc->sc_nqchip > safestats.st_maxqchip)
6060 +               safestats.st_maxqchip = sc->sc_nqchip;
6061 +       /* poke h/w to check descriptor ring, any value can be written */
6062 +       WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
6063 +}
6064 +
6065 +#define        N(a)    (sizeof(a) / sizeof (a[0]))
6066 +static void
6067 +safe_setup_enckey(struct safe_session *ses, caddr_t key)
6068 +{
6069 +       int i;
6070 +
6071 +       bcopy(key, ses->ses_key, ses->ses_klen / 8);
6072 +
6073 +       /* PE is little-endian, insure proper byte order */
6074 +       for (i = 0; i < N(ses->ses_key); i++)
6075 +               ses->ses_key[i] = htole32(ses->ses_key[i]);
6076 +}
6077 +
6078 +static void
6079 +safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
6080 +{
6081 +#ifdef HMAC_HACK
6082 +       MD5_CTX md5ctx;
6083 +       SHA1_CTX sha1ctx;
6084 +       int i;
6085 +
6086 +
6087 +       for (i = 0; i < klen; i++)
6088 +               key[i] ^= HMAC_IPAD_VAL;
6089 +
6090 +       if (algo == CRYPTO_MD5_HMAC) {
6091 +               MD5Init(&md5ctx);
6092 +               MD5Update(&md5ctx, key, klen);
6093 +               MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
6094 +               bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
6095 +       } else {
6096 +               SHA1Init(&sha1ctx);
6097 +               SHA1Update(&sha1ctx, key, klen);
6098 +               SHA1Update(&sha1ctx, hmac_ipad_buffer,
6099 +                   SHA1_HMAC_BLOCK_LEN - klen);
6100 +               bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
6101 +       }
6102 +
6103 +       for (i = 0; i < klen; i++)
6104 +               key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
6105 +
6106 +       if (algo == CRYPTO_MD5_HMAC) {
6107 +               MD5Init(&md5ctx);
6108 +               MD5Update(&md5ctx, key, klen);
6109 +               MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
6110 +               bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
6111 +       } else {
6112 +               SHA1Init(&sha1ctx);
6113 +               SHA1Update(&sha1ctx, key, klen);
6114 +               SHA1Update(&sha1ctx, hmac_opad_buffer,
6115 +                   SHA1_HMAC_BLOCK_LEN - klen);
6116 +               bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
6117 +       }
6118 +
6119 +       for (i = 0; i < klen; i++)
6120 +               key[i] ^= HMAC_OPAD_VAL;
6121 +
6122 +#if 0
6123 +       /*
6124 +        * this code prevents SHA working on a BE host,
6125 +        * so it is obviously wrong.  I think the byte
6126 +        * swap setup we do with the chip fixes this for us
6127 +        */
6128 +
6129 +       /* PE is little-endian, insure proper byte order */
6130 +       for (i = 0; i < N(ses->ses_hminner); i++) {
6131 +               ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
6132 +               ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
6133 +       }
6134 +#endif
6135 +#else /* HMAC_HACK */
6136 +       printk("safe: md5/sha not implemented\n");
6137 +#endif /* HMAC_HACK */
6138 +}
6139 +#undef N
6140 +
6141 +/*
6142 + * Allocate a new 'session' and return an encoded session id.  'sidp'
6143 + * contains our registration id, and should contain an encoded session
6144 + * id on successful allocation.
6145 + */
6146 +static int
6147 +safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
6148 +{
6149 +       struct safe_softc *sc = device_get_softc(dev);
6150 +       struct cryptoini *c, *encini = NULL, *macini = NULL;
6151 +       struct safe_session *ses = NULL;
6152 +       int sesn;
6153 +
6154 +       DPRINTF(("%s()\n", __FUNCTION__));
6155 +
6156 +       if (sidp == NULL || cri == NULL || sc == NULL)
6157 +               return (EINVAL);
6158 +
6159 +       for (c = cri; c != NULL; c = c->cri_next) {
6160 +               if (c->cri_alg == CRYPTO_MD5_HMAC ||
6161 +                   c->cri_alg == CRYPTO_SHA1_HMAC ||
6162 +                   c->cri_alg == CRYPTO_NULL_HMAC) {
6163 +                       if (macini)
6164 +                               return (EINVAL);
6165 +                       macini = c;
6166 +               } else if (c->cri_alg == CRYPTO_DES_CBC ||
6167 +                   c->cri_alg == CRYPTO_3DES_CBC ||
6168 +                   c->cri_alg == CRYPTO_AES_CBC ||
6169 +                   c->cri_alg == CRYPTO_NULL_CBC) {
6170 +                       if (encini)
6171 +                               return (EINVAL);
6172 +                       encini = c;
6173 +               } else
6174 +                       return (EINVAL);
6175 +       }
6176 +       if (encini == NULL && macini == NULL)
6177 +               return (EINVAL);
6178 +       if (encini) {                   /* validate key length */
6179 +               switch (encini->cri_alg) {
6180 +               case CRYPTO_DES_CBC:
6181 +                       if (encini->cri_klen != 64)
6182 +                               return (EINVAL);
6183 +                       break;
6184 +               case CRYPTO_3DES_CBC:
6185 +                       if (encini->cri_klen != 192)
6186 +                               return (EINVAL);
6187 +                       break;
6188 +               case CRYPTO_AES_CBC:
6189 +                       if (encini->cri_klen != 128 &&
6190 +                           encini->cri_klen != 192 &&
6191 +                           encini->cri_klen != 256)
6192 +                               return (EINVAL);
6193 +                       break;
6194 +               }
6195 +       }
6196 +
6197 +       if (sc->sc_sessions == NULL) {
6198 +               ses = sc->sc_sessions = (struct safe_session *)
6199 +                       kmalloc(sizeof(struct safe_session), SLAB_ATOMIC);
6200 +               if (ses == NULL)
6201 +                       return (ENOMEM);
6202 +               memset(ses, 0, sizeof(struct safe_session));
6203 +               sesn = 0;
6204 +               sc->sc_nsessions = 1;
6205 +       } else {
6206 +               for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
6207 +                       if (sc->sc_sessions[sesn].ses_used == 0) {
6208 +                               ses = &sc->sc_sessions[sesn];
6209 +                               break;
6210 +                       }
6211 +               }
6212 +
6213 +               if (ses == NULL) {
6214 +                       sesn = sc->sc_nsessions;
6215 +                       ses = (struct safe_session *)
6216 +                               kmalloc((sesn + 1) * sizeof(struct safe_session), SLAB_ATOMIC);
6217 +                       if (ses == NULL)
6218 +                               return (ENOMEM);
6219 +                       memset(ses, 0, (sesn + 1) * sizeof(struct safe_session));
6220 +                       bcopy(sc->sc_sessions, ses, sesn *
6221 +                           sizeof(struct safe_session));
6222 +                       bzero(sc->sc_sessions, sesn *
6223 +                           sizeof(struct safe_session));
6224 +                       kfree(sc->sc_sessions);
6225 +                       sc->sc_sessions = ses;
6226 +                       ses = &sc->sc_sessions[sesn];
6227 +                       sc->sc_nsessions++;
6228 +               }
6229 +       }
6230 +
6231 +       bzero(ses, sizeof(struct safe_session));
6232 +       ses->ses_used = 1;
6233 +
6234 +       if (encini) {
6235 +               /* get an IV */
6236 +               /* XXX may read fewer than requested */
6237 +               read_random(ses->ses_iv, sizeof(ses->ses_iv));
6238 +
6239 +               ses->ses_klen = encini->cri_klen;
6240 +               if (encini->cri_key != NULL)
6241 +                       safe_setup_enckey(ses, encini->cri_key);
6242 +       }
6243 +
6244 +       if (macini) {
6245 +               ses->ses_mlen = macini->cri_mlen;
6246 +               if (ses->ses_mlen == 0) {
6247 +                       if (macini->cri_alg == CRYPTO_MD5_HMAC)
6248 +                               ses->ses_mlen = MD5_HASH_LEN;
6249 +                       else
6250 +                               ses->ses_mlen = SHA1_HASH_LEN;
6251 +               }
6252 +
6253 +               if (macini->cri_key != NULL) {
6254 +                       safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
6255 +                           macini->cri_klen / 8);
6256 +               }
6257 +       }
6258 +
6259 +       *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
6260 +       return (0);
6261 +}
6262 +
6263 +/*
6264 + * Deallocate a session.
6265 + */
6266 +static int
6267 +safe_freesession(device_t dev, u_int64_t tid)
6268 +{
6269 +       struct safe_softc *sc = device_get_softc(dev);
6270 +       int session, ret;
6271 +       u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
6272 +
6273 +       DPRINTF(("%s()\n", __FUNCTION__));
6274 +
6275 +       if (sc == NULL)
6276 +               return (EINVAL);
6277 +
6278 +       session = SAFE_SESSION(sid);
6279 +       if (session < sc->sc_nsessions) {
6280 +               bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
6281 +               ret = 0;
6282 +       } else
6283 +               ret = EINVAL;
6284 +       return (ret);
6285 +}
6286 +
6287 +
6288 +static int
6289 +safe_process(device_t dev, struct cryptop *crp, int hint)
6290 +{
6291 +       struct safe_softc *sc = device_get_softc(dev);
6292 +       int err = 0, i, nicealign, uniform;
6293 +       struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
6294 +       int bypass, oplen, ivsize;
6295 +       caddr_t iv;
6296 +       int16_t coffset;
6297 +       struct safe_session *ses;
6298 +       struct safe_ringentry *re;
6299 +       struct safe_sarec *sa;
6300 +       struct safe_pdesc *pd;
6301 +       u_int32_t cmd0, cmd1, staterec;
6302 +       unsigned long flags;
6303 +
6304 +       DPRINTF(("%s()\n", __FUNCTION__));
6305 +
6306 +       if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
6307 +               safestats.st_invalid++;
6308 +               return (EINVAL);
6309 +       }
6310 +       if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
6311 +               safestats.st_badsession++;
6312 +               return (EINVAL);
6313 +       }
6314 +
6315 +       spin_lock_irqsave(&sc->sc_ringmtx, flags);
6316 +       if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
6317 +               safestats.st_ringfull++;
6318 +               sc->sc_needwakeup |= CRYPTO_SYMQ;
6319 +               spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6320 +               return (ERESTART);
6321 +       }
6322 +       re = sc->sc_front;
6323 +
6324 +       staterec = re->re_sa.sa_staterec;       /* save */
6325 +       /* NB: zero everything but the PE descriptor */
6326 +       bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
6327 +       re->re_sa.sa_staterec = staterec;       /* restore */
6328 +
6329 +       re->re_crp = crp;
6330 +       re->re_sesn = SAFE_SESSION(crp->crp_sid);
6331 +
6332 +       re->re_src.nsegs = 0;
6333 +       re->re_dst.nsegs = 0;
6334 +
6335 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
6336 +               re->re_src_skb = (struct sk_buff *)crp->crp_buf;
6337 +               re->re_dst_skb = (struct sk_buff *)crp->crp_buf;
6338 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
6339 +               re->re_src_io = (struct uio *)crp->crp_buf;
6340 +               re->re_dst_io = (struct uio *)crp->crp_buf;
6341 +       } else {
6342 +               safestats.st_badflags++;
6343 +               err = EINVAL;
6344 +               goto errout;    /* XXX we don't handle contiguous blocks! */
6345 +       }
6346 +
6347 +       sa = &re->re_sa;
6348 +       ses = &sc->sc_sessions[re->re_sesn];
6349 +
6350 +       crd1 = crp->crp_desc;
6351 +       if (crd1 == NULL) {
6352 +               safestats.st_nodesc++;
6353 +               err = EINVAL;
6354 +               goto errout;
6355 +       }
6356 +       crd2 = crd1->crd_next;
6357 +
6358 +       cmd0 = SAFE_SA_CMD0_BASIC;              /* basic group operation */
6359 +       cmd1 = 0;
6360 +       if (crd2 == NULL) {
6361 +               if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
6362 +                   crd1->crd_alg == CRYPTO_SHA1_HMAC ||
6363 +                   crd1->crd_alg == CRYPTO_NULL_HMAC) {
6364 +                       maccrd = crd1;
6365 +                       enccrd = NULL;
6366 +                       cmd0 |= SAFE_SA_CMD0_OP_HASH;
6367 +               } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
6368 +                   crd1->crd_alg == CRYPTO_3DES_CBC ||
6369 +                   crd1->crd_alg == CRYPTO_AES_CBC ||
6370 +                   crd1->crd_alg == CRYPTO_NULL_CBC) {
6371 +                       maccrd = NULL;
6372 +                       enccrd = crd1;
6373 +                       cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
6374 +               } else {
6375 +                       safestats.st_badalg++;
6376 +                       err = EINVAL;
6377 +                       goto errout;
6378 +               }
6379 +       } else {
6380 +               if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
6381 +                   crd1->crd_alg == CRYPTO_SHA1_HMAC ||
6382 +                   crd1->crd_alg == CRYPTO_NULL_HMAC) &&
6383 +                   (crd2->crd_alg == CRYPTO_DES_CBC ||
6384 +                       crd2->crd_alg == CRYPTO_3DES_CBC ||
6385 +                       crd2->crd_alg == CRYPTO_AES_CBC ||
6386 +                       crd2->crd_alg == CRYPTO_NULL_CBC) &&
6387 +                   ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
6388 +                       maccrd = crd1;
6389 +                       enccrd = crd2;
6390 +               } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
6391 +                   crd1->crd_alg == CRYPTO_3DES_CBC ||
6392 +                   crd1->crd_alg == CRYPTO_AES_CBC ||
6393 +                   crd1->crd_alg == CRYPTO_NULL_CBC) &&
6394 +                   (crd2->crd_alg == CRYPTO_MD5_HMAC ||
6395 +                       crd2->crd_alg == CRYPTO_SHA1_HMAC ||
6396 +                       crd2->crd_alg == CRYPTO_NULL_HMAC) &&
6397 +                   (crd1->crd_flags & CRD_F_ENCRYPT)) {
6398 +                       enccrd = crd1;
6399 +                       maccrd = crd2;
6400 +               } else {
6401 +                       safestats.st_badalg++;
6402 +                       err = EINVAL;
6403 +                       goto errout;
6404 +               }
6405 +               cmd0 |= SAFE_SA_CMD0_OP_BOTH;
6406 +       }
6407 +
6408 +       if (enccrd) {
6409 +               if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
6410 +                       safe_setup_enckey(ses, enccrd->crd_key);
6411 +
6412 +               if (enccrd->crd_alg == CRYPTO_DES_CBC) {
6413 +                       cmd0 |= SAFE_SA_CMD0_DES;
6414 +                       cmd1 |= SAFE_SA_CMD1_CBC;
6415 +                       ivsize = 2*sizeof(u_int32_t);
6416 +               } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
6417 +                       cmd0 |= SAFE_SA_CMD0_3DES;
6418 +                       cmd1 |= SAFE_SA_CMD1_CBC;
6419 +                       ivsize = 2*sizeof(u_int32_t);
6420 +               } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
6421 +                       cmd0 |= SAFE_SA_CMD0_AES;
6422 +                       cmd1 |= SAFE_SA_CMD1_CBC;
6423 +                       if (ses->ses_klen == 128)
6424 +                            cmd1 |=  SAFE_SA_CMD1_AES128;
6425 +                       else if (ses->ses_klen == 192)
6426 +                            cmd1 |=  SAFE_SA_CMD1_AES192;
6427 +                       else
6428 +                            cmd1 |=  SAFE_SA_CMD1_AES256;
6429 +                       ivsize = 4*sizeof(u_int32_t);
6430 +               } else {
6431 +                       cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
6432 +                       ivsize = 0;
6433 +               }
6434 +
6435 +               /*
6436 +                * Setup encrypt/decrypt state.  When using basic ops
6437 +                * we can't use an inline IV because hash/crypt offset
6438 +                * must be from the end of the IV to the start of the
6439 +                * crypt data and this leaves out the preceding header
6440 +                * from the hash calculation.  Instead we place the IV
6441 +                * in the state record and set the hash/crypt offset to
6442 +                * copy both the header+IV.
6443 +                */
6444 +               if (enccrd->crd_flags & CRD_F_ENCRYPT) {
6445 +                       cmd0 |= SAFE_SA_CMD0_OUTBOUND;
6446 +
6447 +                       if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
6448 +                               iv = enccrd->crd_iv;
6449 +                       else
6450 +                               iv = (caddr_t) ses->ses_iv;
6451 +                       if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
6452 +                               crypto_copyback(crp->crp_flags, crp->crp_buf,
6453 +                                   enccrd->crd_inject, ivsize, iv);
6454 +                       }
6455 +                       bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
6456 +                       /* make iv LE */
6457 +                       for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
6458 +                               re->re_sastate.sa_saved_iv[i] =
6459 +                                       cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
6460 +                       cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
6461 +                       re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
6462 +               } else {
6463 +                       cmd0 |= SAFE_SA_CMD0_INBOUND;
6464 +
6465 +                       if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
6466 +                               bcopy(enccrd->crd_iv,
6467 +                                       re->re_sastate.sa_saved_iv, ivsize);
6468 +                       } else {
6469 +                               crypto_copydata(crp->crp_flags, crp->crp_buf,
6470 +                                   enccrd->crd_inject, ivsize,
6471 +                                   (caddr_t)re->re_sastate.sa_saved_iv);
6472 +                       }
6473 +                       /* make iv LE */
6474 +                       for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
6475 +                               re->re_sastate.sa_saved_iv[i] =
6476 +                                       cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
6477 +                       cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
6478 +               }
6479 +               /*
6480 +                * For basic encryption use the zero pad algorithm.
6481 +                * This pads results to an 8-byte boundary and
6482 +                * suppresses padding verification for inbound (i.e.
6483 +                * decrypt) operations.
6484 +                *
6485 +                * NB: Not sure if the 8-byte pad boundary is a problem.
6486 +                */
6487 +               cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
6488 +
6489 +               /* XXX assert key bufs have the same size */
6490 +               bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
6491 +       }
6492 +
6493 +       if (maccrd) {
6494 +               if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
6495 +                       safe_setup_mackey(ses, maccrd->crd_alg,
6496 +                           maccrd->crd_key, maccrd->crd_klen / 8);
6497 +               }
6498 +
6499 +               if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
6500 +                       cmd0 |= SAFE_SA_CMD0_MD5;
6501 +                       cmd1 |= SAFE_SA_CMD1_HMAC;      /* NB: enable HMAC */
6502 +               } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
6503 +                       cmd0 |= SAFE_SA_CMD0_SHA1;
6504 +                       cmd1 |= SAFE_SA_CMD1_HMAC;      /* NB: enable HMAC */
6505 +               } else {
6506 +                       cmd0 |= SAFE_SA_CMD0_HASH_NULL;
6507 +               }
6508 +               /*
6509 +                * Digest data is loaded from the SA and the hash
6510 +                * result is saved to the state block where we
6511 +                * retrieve it for return to the caller.
6512 +                */
6513 +               /* XXX assert digest bufs have the same size */
6514 +               bcopy(ses->ses_hminner, sa->sa_indigest,
6515 +                       sizeof(sa->sa_indigest));
6516 +               bcopy(ses->ses_hmouter, sa->sa_outdigest,
6517 +                       sizeof(sa->sa_outdigest));
6518 +
6519 +               cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
6520 +               re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
6521 +       }
6522 +
6523 +       if (enccrd && maccrd) {
6524 +               /*
6525 +                * The offset from hash data to the start of
6526 +                * crypt data is the difference in the skips.
6527 +                */
6528 +               bypass = maccrd->crd_skip;
6529 +               coffset = enccrd->crd_skip - maccrd->crd_skip;
6530 +               if (coffset < 0) {
6531 +                       DPRINTF(("%s: hash does not precede crypt; "
6532 +                               "mac skip %u enc skip %u\n",
6533 +                               __func__, maccrd->crd_skip, enccrd->crd_skip));
6534 +                       safestats.st_skipmismatch++;
6535 +                       err = EINVAL;
6536 +                       goto errout;
6537 +               }
6538 +               oplen = enccrd->crd_skip + enccrd->crd_len;
6539 +               if (maccrd->crd_skip + maccrd->crd_len != oplen) {
6540 +                       DPRINTF(("%s: hash amount %u != crypt amount %u\n",
6541 +                               __func__, maccrd->crd_skip + maccrd->crd_len,
6542 +                               oplen));
6543 +                       safestats.st_lenmismatch++;
6544 +                       err = EINVAL;
6545 +                       goto errout;
6546 +               }
6547 +#ifdef SAFE_DEBUG
6548 +               if (debug) {
6549 +                       printf("mac: skip %d, len %d, inject %d\n",
6550 +                           maccrd->crd_skip, maccrd->crd_len,
6551 +                           maccrd->crd_inject);
6552 +                       printf("enc: skip %d, len %d, inject %d\n",
6553 +                           enccrd->crd_skip, enccrd->crd_len,
6554 +                           enccrd->crd_inject);
6555 +                       printf("bypass %d coffset %d oplen %d\n",
6556 +                               bypass, coffset, oplen);
6557 +               }
6558 +#endif
6559 +               if (coffset & 3) {      /* offset must be 32-bit aligned */
6560 +                       DPRINTF(("%s: coffset %u misaligned\n",
6561 +                               __func__, coffset));
6562 +                       safestats.st_coffmisaligned++;
6563 +                       err = EINVAL;
6564 +                       goto errout;
6565 +               }
6566 +               coffset >>= 2;
6567 +               if (coffset > 255) {    /* offset must be <256 dwords */
6568 +                       DPRINTF(("%s: coffset %u too big\n",
6569 +                               __func__, coffset));
6570 +                       safestats.st_cofftoobig++;
6571 +                       err = EINVAL;
6572 +                       goto errout;
6573 +               }
6574 +               /*
6575 +                * Tell the hardware to copy the header to the output.
6576 +                * The header is defined as the data from the end of
6577 +                * the bypass to the start of data to be encrypted. 
6578 +                * Typically this is the inline IV.  Note that you need
6579 +                * to do this even if src+dst are the same; it appears
6580 +                * that w/o this bit the crypted data is written
6581 +                * immediately after the bypass data.
6582 +                */
6583 +               cmd1 |= SAFE_SA_CMD1_HDRCOPY;
6584 +               /*
6585 +                * Disable IP header mutable bit handling.  This is
6586 +                * needed to get correct HMAC calculations.
6587 +                */
6588 +               cmd1 |= SAFE_SA_CMD1_MUTABLE;
6589 +       } else {
6590 +               if (enccrd) {
6591 +                       bypass = enccrd->crd_skip;
6592 +                       oplen = bypass + enccrd->crd_len;
6593 +               } else {
6594 +                       bypass = maccrd->crd_skip;
6595 +                       oplen = bypass + maccrd->crd_len;
6596 +               }
6597 +               coffset = 0;
6598 +       }
6599 +       /* XXX verify multiple of 4 when using s/g */
6600 +       if (bypass > 96) {              /* bypass offset must be <= 96 bytes */
6601 +               DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
6602 +               safestats.st_bypasstoobig++;
6603 +               err = EINVAL;
6604 +               goto errout;
6605 +       }
6606 +
6607 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
6608 +               if (pci_map_skb(sc, &re->re_src, re->re_src_skb)) {
6609 +                       safestats.st_noload++;
6610 +                       err = ENOMEM;
6611 +                       goto errout;
6612 +               }
6613 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
6614 +               if (pci_map_uio(sc, &re->re_src, re->re_src_io)) {
6615 +                       safestats.st_noload++;
6616 +                       err = ENOMEM;
6617 +                       goto errout;
6618 +               }
6619 +       }
6620 +       nicealign = safe_dmamap_aligned(sc, &re->re_src);
6621 +       uniform = safe_dmamap_uniform(sc, &re->re_src);
6622 +
6623 +       DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
6624 +               nicealign, uniform, re->re_src.nsegs));
6625 +       if (re->re_src.nsegs > 1) {
6626 +               re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
6627 +                       ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
6628 +               for (i = 0; i < re->re_src_nsegs; i++) {
6629 +                       /* NB: no need to check if there's space */
6630 +                       pd = sc->sc_spfree;
6631 +                       if (++(sc->sc_spfree) == sc->sc_springtop)
6632 +                               sc->sc_spfree = sc->sc_spring;
6633 +
6634 +                       KASSERT((pd->pd_flags&3) == 0 ||
6635 +                               (pd->pd_flags&3) == SAFE_PD_DONE,
6636 +                               ("bogus source particle descriptor; flags %x",
6637 +                               pd->pd_flags));
6638 +                       pd->pd_addr = re->re_src_segs[i].ds_addr;
6639 +                       pd->pd_size = re->re_src_segs[i].ds_len;
6640 +                       pd->pd_flags = SAFE_PD_READY;
6641 +               }
6642 +               cmd0 |= SAFE_SA_CMD0_IGATHER;
6643 +       } else {
6644 +               /*
6645 +                * No need for gather, reference the operand directly.
6646 +                */
6647 +               re->re_desc.d_src = re->re_src_segs[0].ds_addr;
6648 +       }
6649 +
6650 +       if (enccrd == NULL && maccrd != NULL) {
6651 +               /*
6652 +                * Hash op; no destination needed.
6653 +                */
6654 +       } else {
6655 +               if (crp->crp_flags & (CRYPTO_F_IOV|CRYPTO_F_SKBUF)) {
6656 +                       if (!nicealign) {
6657 +                               safestats.st_iovmisaligned++;
6658 +                               err = EINVAL;
6659 +                               goto errout;
6660 +                       }
6661 +                       if (uniform != 1) {
6662 +                               device_printf(sc->sc_dev, "!uniform source\n");
6663 +                               if (!uniform) {
6664 +                                       /*
6665 +                                        * There's no way to handle the DMA
6666 +                                        * requirements with this uio.  We
6667 +                                        * could create a separate DMA area for
6668 +                                        * the result and then copy it back,
6669 +                                        * but for now we just bail and return
6670 +                                        * an error.  Note that uio requests
6671 +                                        * > SAFE_MAX_DSIZE are handled because
6672 +                                        * the DMA map and segment list for the
6673 +                                        * destination wil result in a
6674 +                                        * destination particle list that does
6675 +                                        * the necessary scatter DMA.
6676 +                                        */ 
6677 +                                       safestats.st_iovnotuniform++;
6678 +                                       err = EINVAL;
6679 +                                       goto errout;
6680 +                               }
6681 +                       } else
6682 +                               re->re_dst = re->re_src;
6683 +               } else {
6684 +                       safestats.st_badflags++;
6685 +                       err = EINVAL;
6686 +                       goto errout;
6687 +               }
6688 +
6689 +               if (re->re_dst.nsegs > 1) {
6690 +                       re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
6691 +                           ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
6692 +                       for (i = 0; i < re->re_dst_nsegs; i++) {
6693 +                               pd = sc->sc_dpfree;
6694 +                               KASSERT((pd->pd_flags&3) == 0 ||
6695 +                                       (pd->pd_flags&3) == SAFE_PD_DONE,
6696 +                                       ("bogus dest particle descriptor; flags %x",
6697 +                                               pd->pd_flags));
6698 +                               if (++(sc->sc_dpfree) == sc->sc_dpringtop)
6699 +                                       sc->sc_dpfree = sc->sc_dpring;
6700 +                               pd->pd_addr = re->re_dst_segs[i].ds_addr;
6701 +                               pd->pd_flags = SAFE_PD_READY;
6702 +                       }
6703 +                       cmd0 |= SAFE_SA_CMD0_OSCATTER;
6704 +               } else {
6705 +                       /*
6706 +                        * No need for scatter, reference the operand directly.
6707 +                        */
6708 +                       re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
6709 +               }
6710 +       }
6711 +
6712 +       /*
6713 +        * All done with setup; fillin the SA command words
6714 +        * and the packet engine descriptor.  The operation
6715 +        * is now ready for submission to the hardware.
6716 +        */
6717 +       sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
6718 +       sa->sa_cmd1 = cmd1
6719 +                   | (coffset << SAFE_SA_CMD1_OFFSET_S)
6720 +                   | SAFE_SA_CMD1_SAREV1       /* Rev 1 SA data structure */
6721 +                   | SAFE_SA_CMD1_SRPCI
6722 +                   ;
6723 +       /*
6724 +        * NB: the order of writes is important here.  In case the
6725 +        * chip is scanning the ring because of an outstanding request
6726 +        * it might nab this one too.  In that case we need to make
6727 +        * sure the setup is complete before we write the length
6728 +        * field of the descriptor as it signals the descriptor is
6729 +        * ready for processing.
6730 +        */
6731 +       re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
6732 +       if (maccrd)
6733 +               re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
6734 +       wmb();
6735 +       re->re_desc.d_len = oplen
6736 +                         | SAFE_PE_LEN_READY
6737 +                         | (bypass << SAFE_PE_LEN_BYPASS_S)
6738 +                         ;
6739 +
6740 +       safestats.st_ipackets++;
6741 +       safestats.st_ibytes += oplen;
6742 +
6743 +       if (++(sc->sc_front) == sc->sc_ringtop)
6744 +               sc->sc_front = sc->sc_ring;
6745 +
6746 +       /* XXX honor batching */
6747 +       safe_feed(sc, re);
6748 +       spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6749 +       return (0);
6750 +
6751 +errout:
6752 +       if (re->re_src.map != re->re_dst.map)
6753 +               pci_unmap_operand(sc, &re->re_dst);
6754 +       if (re->re_src.map)
6755 +               pci_unmap_operand(sc, &re->re_src);
6756 +       spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6757 +       if (err != ERESTART) {
6758 +               crp->crp_etype = err;
6759 +               crypto_done(crp);
6760 +       } else {
6761 +               sc->sc_needwakeup |= CRYPTO_SYMQ;
6762 +       }
6763 +       return (err);
6764 +}
6765 +
6766 +static void
6767 +safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
6768 +{
6769 +       struct cryptop *crp = (struct cryptop *)re->re_crp;
6770 +       struct cryptodesc *crd;
6771 +
6772 +       DPRINTF(("%s()\n", __FUNCTION__));
6773 +
6774 +       safestats.st_opackets++;
6775 +       safestats.st_obytes += re->re_dst.mapsize;
6776 +
6777 +       if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
6778 +               device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
6779 +                       re->re_desc.d_csr,
6780 +                       re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
6781 +               safestats.st_peoperr++;
6782 +               crp->crp_etype = EIO;           /* something more meaningful? */
6783 +       }
6784 +
6785 +       if (re->re_dst.map != NULL && re->re_dst.map != re->re_src.map)
6786 +               pci_unmap_operand(sc, &re->re_dst);
6787 +       pci_unmap_operand(sc, &re->re_src);
6788 +
6789 +       /* 
6790 +        * If result was written to a differet mbuf chain, swap
6791 +        * it in as the return value and reclaim the original.
6792 +        */
6793 +       if ((crp->crp_flags & CRYPTO_F_SKBUF) && re->re_src_skb != re->re_dst_skb) {
6794 +               device_printf(sc->sc_dev, "no CRYPTO_F_SKBUF swapping support\n");
6795 +               /* kfree_skb(skb) */
6796 +               /* crp->crp_buf = (caddr_t)re->re_dst_skb */
6797 +               return;
6798 +       }
6799 +
6800 +       if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
6801 +               /* copy out IV for future use */
6802 +               for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
6803 +                       int i;
6804 +                       int ivsize;
6805 +
6806 +                       if (crd->crd_alg == CRYPTO_DES_CBC ||
6807 +                           crd->crd_alg == CRYPTO_3DES_CBC) {
6808 +                               ivsize = 2*sizeof(u_int32_t);
6809 +                       } else if (crd->crd_alg == CRYPTO_AES_CBC) {
6810 +                               ivsize = 4*sizeof(u_int32_t);
6811 +                       } else
6812 +                               continue;
6813 +                       crypto_copydata(crp->crp_flags, crp->crp_buf,
6814 +                           crd->crd_skip + crd->crd_len - ivsize, ivsize,
6815 +                           (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv);
6816 +                       for (i = 0;
6817 +                                       i < ivsize/sizeof(sc->sc_sessions[re->re_sesn].ses_iv[0]);
6818 +                                       i++)
6819 +                               sc->sc_sessions[re->re_sesn].ses_iv[i] =
6820 +                                       cpu_to_le32(sc->sc_sessions[re->re_sesn].ses_iv[i]);
6821 +                       break;
6822 +               }
6823 +       }
6824 +
6825 +       if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
6826 +               /* copy out ICV result */
6827 +               for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
6828 +                       if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
6829 +                           crd->crd_alg == CRYPTO_SHA1_HMAC ||
6830 +                           crd->crd_alg == CRYPTO_NULL_HMAC))
6831 +                               continue;
6832 +                       if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
6833 +                               /*
6834 +                                * SHA-1 ICV's are byte-swapped; fix 'em up
6835 +                                * before copy them to their destination.
6836 +                                */
6837 +                               re->re_sastate.sa_saved_indigest[0] =
6838 +                                       cpu_to_be32(re->re_sastate.sa_saved_indigest[0]);
6839 +                               re->re_sastate.sa_saved_indigest[1] = 
6840 +                                       cpu_to_be32(re->re_sastate.sa_saved_indigest[1]);
6841 +                               re->re_sastate.sa_saved_indigest[2] =
6842 +                                       cpu_to_be32(re->re_sastate.sa_saved_indigest[2]);
6843 +                       } else {
6844 +                               re->re_sastate.sa_saved_indigest[0] =
6845 +                                       cpu_to_le32(re->re_sastate.sa_saved_indigest[0]);
6846 +                               re->re_sastate.sa_saved_indigest[1] = 
6847 +                                       cpu_to_le32(re->re_sastate.sa_saved_indigest[1]);
6848 +                               re->re_sastate.sa_saved_indigest[2] =
6849 +                                       cpu_to_le32(re->re_sastate.sa_saved_indigest[2]);
6850 +                       }
6851 +                       crypto_copyback(crp->crp_flags, crp->crp_buf,
6852 +                           crd->crd_inject,
6853 +                           sc->sc_sessions[re->re_sesn].ses_mlen,
6854 +                           (caddr_t)re->re_sastate.sa_saved_indigest);
6855 +                       break;
6856 +               }
6857 +       }
6858 +       crypto_done(crp);
6859 +}
6860 +
6861 +
6862 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
6863 +#define        SAFE_RNG_MAXWAIT        1000
6864 +
6865 +static void
6866 +safe_rng_init(struct safe_softc *sc)
6867 +{
6868 +       u_int32_t w, v;
6869 +       int i;
6870 +
6871 +       DPRINTF(("%s()\n", __FUNCTION__));
6872 +
6873 +       WRITE_REG(sc, SAFE_RNG_CTRL, 0);
6874 +       /* use default value according to the manual */
6875 +       WRITE_REG(sc, SAFE_RNG_CNFG, 0x834);    /* magic from SafeNet */
6876 +       WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
6877 +
6878 +       /*
6879 +        * There is a bug in rev 1.0 of the 1140 that when the RNG
6880 +        * is brought out of reset the ready status flag does not
6881 +        * work until the RNG has finished its internal initialization.
6882 +        *
6883 +        * So in order to determine the device is through its
6884 +        * initialization we must read the data register, using the
6885 +        * status reg in the read in case it is initialized.  Then read
6886 +        * the data register until it changes from the first read.
6887 +        * Once it changes read the data register until it changes
6888 +        * again.  At this time the RNG is considered initialized. 
6889 +        * This could take between 750ms - 1000ms in time.
6890 +        */
6891 +       i = 0;
6892 +       w = READ_REG(sc, SAFE_RNG_OUT);
6893 +       do {
6894 +               v = READ_REG(sc, SAFE_RNG_OUT);
6895 +               if (v != w) {
6896 +                       w = v;
6897 +                       break;
6898 +               }
6899 +               DELAY(10);
6900 +       } while (++i < SAFE_RNG_MAXWAIT);
6901 +
6902 +       /* Wait Until data changes again */
6903 +       i = 0;
6904 +       do {
6905 +               v = READ_REG(sc, SAFE_RNG_OUT);
6906 +               if (v != w)
6907 +                       break;
6908 +               DELAY(10);
6909 +       } while (++i < SAFE_RNG_MAXWAIT);
6910 +}
6911 +
6912 +static __inline void
6913 +safe_rng_disable_short_cycle(struct safe_softc *sc)
6914 +{
6915 +       DPRINTF(("%s()\n", __FUNCTION__));
6916 +
6917 +       WRITE_REG(sc, SAFE_RNG_CTRL,
6918 +               READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
6919 +}
6920 +
6921 +static __inline void
6922 +safe_rng_enable_short_cycle(struct safe_softc *sc)
6923 +{
6924 +       DPRINTF(("%s()\n", __FUNCTION__));
6925 +
6926 +       WRITE_REG(sc, SAFE_RNG_CTRL, 
6927 +               READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
6928 +}
6929 +
6930 +static __inline u_int32_t
6931 +safe_rng_read(struct safe_softc *sc)
6932 +{
6933 +       int i;
6934 +
6935 +       i = 0;
6936 +       while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
6937 +               ;
6938 +       return READ_REG(sc, SAFE_RNG_OUT);
6939 +}
6940 +
6941 +static int
6942 +safe_read_random(void *arg, u_int32_t *buf, int maxwords)
6943 +{
6944 +       struct safe_softc *sc = (struct safe_softc *) arg;
6945 +       int i, rc;
6946 +
6947 +       DPRINTF(("%s()\n", __FUNCTION__));
6948 +       
6949 +       safestats.st_rng++;
6950 +       /*
6951 +        * Fetch the next block of data.
6952 +        */
6953 +       if (maxwords > safe_rngbufsize)
6954 +               maxwords = safe_rngbufsize;
6955 +       if (maxwords > SAFE_RNG_MAXBUFSIZ)
6956 +               maxwords = SAFE_RNG_MAXBUFSIZ;
6957 +retry:
6958 +       /* read as much as we can */
6959 +       for (rc = 0; rc < maxwords; rc++) {
6960 +               if (READ_REG(sc, SAFE_RNG_STAT) != 0)
6961 +                       break;
6962 +               buf[rc] = READ_REG(sc, SAFE_RNG_OUT);
6963 +       }
6964 +       if (rc == 0)
6965 +               return 0;
6966 +       /*
6967 +        * Check the comparator alarm count and reset the h/w if
6968 +        * it exceeds our threshold.  This guards against the
6969 +        * hardware oscillators resonating with external signals.
6970 +        */
6971 +       if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
6972 +               u_int32_t freq_inc, w;
6973 +
6974 +               DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
6975 +                       (unsigned)READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
6976 +               safestats.st_rngalarm++;
6977 +               safe_rng_enable_short_cycle(sc);
6978 +               freq_inc = 18;
6979 +               for (i = 0; i < 64; i++) {
6980 +                       w = READ_REG(sc, SAFE_RNG_CNFG);
6981 +                       freq_inc = ((w + freq_inc) & 0x3fL);
6982 +                       w = ((w & ~0x3fL) | freq_inc);
6983 +                       WRITE_REG(sc, SAFE_RNG_CNFG, w);
6984 +
6985 +                       WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
6986 +
6987 +                       (void) safe_rng_read(sc);
6988 +                       DELAY(25);
6989 +
6990 +                       if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
6991 +                               safe_rng_disable_short_cycle(sc);
6992 +                               goto retry;
6993 +                       }
6994 +                       freq_inc = 1;
6995 +               }
6996 +               safe_rng_disable_short_cycle(sc);
6997 +       } else
6998 +               WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
6999 +
7000 +       return(rc);
7001 +}
7002 +#endif /* defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG) */
7003 +
7004 +
7005 +/*
7006 + * Resets the board.  Values in the regesters are left as is
7007 + * from the reset (i.e. initial values are assigned elsewhere).
7008 + */
7009 +static void
7010 +safe_reset_board(struct safe_softc *sc)
7011 +{
7012 +       u_int32_t v;
7013 +       /*
7014 +        * Reset the device.  The manual says no delay
7015 +        * is needed between marking and clearing reset.
7016 +        */
7017 +       DPRINTF(("%s()\n", __FUNCTION__));
7018 +
7019 +       v = READ_REG(sc, SAFE_PE_DMACFG) &~
7020 +               (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
7021 +                SAFE_PE_DMACFG_SGRESET);
7022 +       WRITE_REG(sc, SAFE_PE_DMACFG, v
7023 +                                   | SAFE_PE_DMACFG_PERESET
7024 +                                   | SAFE_PE_DMACFG_PDRRESET
7025 +                                   | SAFE_PE_DMACFG_SGRESET);
7026 +       WRITE_REG(sc, SAFE_PE_DMACFG, v);
7027 +}
7028 +
7029 +/*
7030 + * Initialize registers we need to touch only once.
7031 + */
7032 +static void
7033 +safe_init_board(struct safe_softc *sc)
7034 +{
7035 +       u_int32_t v, dwords;
7036 +
7037 +       DPRINTF(("%s()\n", __FUNCTION__));
7038 +
7039 +       v = READ_REG(sc, SAFE_PE_DMACFG);
7040 +       v &=~ (   SAFE_PE_DMACFG_PEMODE
7041 +                       | SAFE_PE_DMACFG_FSENA          /* failsafe enable */
7042 +                       | SAFE_PE_DMACFG_GPRPCI         /* gather ring on PCI */
7043 +                       | SAFE_PE_DMACFG_SPRPCI         /* scatter ring on PCI */
7044 +                       | SAFE_PE_DMACFG_ESDESC         /* endian-swap descriptors */
7045 +                       | SAFE_PE_DMACFG_ESPDESC        /* endian-swap part. desc's */
7046 +                       | SAFE_PE_DMACFG_ESSA           /* endian-swap SA's */
7047 +                       | SAFE_PE_DMACFG_ESPACKET       /* swap the packet data */
7048 +                 );
7049 +       v |= SAFE_PE_DMACFG_FSENA               /* failsafe enable */
7050 +         |  SAFE_PE_DMACFG_GPRPCI              /* gather ring on PCI */
7051 +         |  SAFE_PE_DMACFG_SPRPCI              /* scatter ring on PCI */
7052 +         |  SAFE_PE_DMACFG_ESDESC              /* endian-swap descriptors */
7053 +         |  SAFE_PE_DMACFG_ESPDESC             /* endian-swap part. desc's */
7054 +         |  SAFE_PE_DMACFG_ESSA                /* endian-swap SA's */
7055 +#if 0
7056 +         |  SAFE_PE_DMACFG_ESPACKET    /* swap the packet data */
7057 +#endif
7058 +         ;
7059 +       WRITE_REG(sc, SAFE_PE_DMACFG, v);
7060 +
7061 +#ifdef __BIG_ENDIAN
7062 +       /* tell the safenet that we are 4321 and not 1234 */
7063 +       WRITE_REG(sc, SAFE_ENDIAN, 0xe4e41b1b);
7064 +#endif
7065 +
7066 +       if (sc->sc_chiprev == SAFE_REV(1,0)) {
7067 +               /*
7068 +                * Avoid large PCI DMA transfers.  Rev 1.0 has a bug where
7069 +                * "target mode transfers" done while the chip is DMA'ing
7070 +                * >1020 bytes cause the hardware to lockup.  To avoid this
7071 +                * we reduce the max PCI transfer size and use small source
7072 +                * particle descriptors (<= 256 bytes).
7073 +                */
7074 +               WRITE_REG(sc, SAFE_DMA_CFG, 256);
7075 +               device_printf(sc->sc_dev,
7076 +                       "Reduce max DMA size to %u words for rev %u.%u WAR\n",
7077 +                       (unsigned) ((READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff),
7078 +                       (unsigned) SAFE_REV_MAJ(sc->sc_chiprev),
7079 +                       (unsigned) SAFE_REV_MIN(sc->sc_chiprev));
7080 +               sc->sc_max_dsize = 256;
7081 +       } else {
7082 +               sc->sc_max_dsize = SAFE_MAX_DSIZE;
7083 +       }
7084 +
7085 +       /* NB: operands+results are overlaid */
7086 +       WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
7087 +       WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
7088 +       /*
7089 +        * Configure ring entry size and number of items in the ring.
7090 +        */
7091 +       KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
7092 +               ("PE ring entry not 32-bit aligned!"));
7093 +       dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
7094 +       WRITE_REG(sc, SAFE_PE_RINGCFG,
7095 +               (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
7096 +       WRITE_REG(sc, SAFE_PE_RINGPOLL, 0);     /* disable polling */
7097 +
7098 +       WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
7099 +       WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
7100 +       WRITE_REG(sc, SAFE_PE_PARTSIZE,
7101 +               (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
7102 +       /*
7103 +        * NB: destination particles are fixed size.  We use
7104 +        *     an mbuf cluster and require all results go to
7105 +        *     clusters or smaller.
7106 +        */
7107 +       WRITE_REG(sc, SAFE_PE_PARTCFG, sc->sc_max_dsize);
7108 +
7109 +       /* it's now safe to enable PE mode, do it */
7110 +       WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
7111 +
7112 +       /*
7113 +        * Configure hardware to use level-triggered interrupts and
7114 +        * to interrupt after each descriptor is processed.
7115 +        */
7116 +       WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
7117 +       WRITE_REG(sc, SAFE_HI_CLR, 0xffffffff);
7118 +       WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
7119 +       WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
7120 +}
7121 +
7122 +
7123 +/*
7124 + * Clean up after a chip crash.
7125 + * It is assumed that the caller in splimp()
7126 + */
7127 +static void
7128 +safe_cleanchip(struct safe_softc *sc)
7129 +{
7130 +       DPRINTF(("%s()\n", __FUNCTION__));
7131 +
7132 +       if (sc->sc_nqchip != 0) {
7133 +               struct safe_ringentry *re = sc->sc_back;
7134 +
7135 +               while (re != sc->sc_front) {
7136 +                       if (re->re_desc.d_csr != 0)
7137 +                               safe_free_entry(sc, re);
7138 +                       if (++re == sc->sc_ringtop)
7139 +                               re = sc->sc_ring;
7140 +               }
7141 +               sc->sc_back = re;
7142 +               sc->sc_nqchip = 0;
7143 +       }
7144 +}
7145 +
7146 +/*
7147 + * free a safe_q
7148 + * It is assumed that the caller is within splimp().
7149 + */
7150 +static int
7151 +safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
7152 +{
7153 +       struct cryptop *crp;
7154 +
7155 +       DPRINTF(("%s()\n", __FUNCTION__));
7156 +
7157 +       /*
7158 +        * Free header MCR
7159 +        */
7160 +       if ((re->re_dst_skb != NULL) && (re->re_src_skb != re->re_dst_skb))
7161 +#ifdef NOTYET
7162 +               m_freem(re->re_dst_m);
7163 +#else
7164 +               printk("%s,%d: SKB not supported\n", __FILE__, __LINE__);
7165 +#endif
7166 +
7167 +       crp = (struct cryptop *)re->re_crp;
7168 +       
7169 +       re->re_desc.d_csr = 0;
7170 +       
7171 +       crp->crp_etype = EFAULT;
7172 +       crypto_done(crp);
7173 +       return(0);
7174 +}
7175 +
7176 +/*
7177 + * Routine to reset the chip and clean up.
7178 + * It is assumed that the caller is in splimp()
7179 + */
7180 +static void
7181 +safe_totalreset(struct safe_softc *sc)
7182 +{
7183 +       DPRINTF(("%s()\n", __FUNCTION__));
7184 +
7185 +       safe_reset_board(sc);
7186 +       safe_init_board(sc);
7187 +       safe_cleanchip(sc);
7188 +}
7189 +
7190 +/*
7191 + * Is the operand suitable aligned for direct DMA.  Each
7192 + * segment must be aligned on a 32-bit boundary and all
7193 + * but the last segment must be a multiple of 4 bytes.
7194 + */
7195 +static int
7196 +safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op)
7197 +{
7198 +       int i;
7199 +
7200 +       DPRINTF(("%s()\n", __FUNCTION__));
7201 +
7202 +       for (i = 0; i < op->nsegs; i++) {
7203 +               if (op->segs[i].ds_addr & 3)
7204 +                       return (0);
7205 +               if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
7206 +                       return (0);
7207 +       }
7208 +       return (1);
7209 +}
7210 +
7211 +/*
7212 + * Is the operand suitable for direct DMA as the destination
7213 + * of an operation.  The hardware requires that each ``particle''
7214 + * but the last in an operation result have the same size.  We
7215 + * fix that size at SAFE_MAX_DSIZE bytes.  This routine returns
7216 + * 0 if some segment is not a multiple of of this size, 1 if all
7217 + * segments are exactly this size, or 2 if segments are at worst
7218 + * a multple of this size.
7219 + */
7220 +static int
7221 +safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op)
7222 +{
7223 +       int result = 1;
7224 +
7225 +       DPRINTF(("%s()\n", __FUNCTION__));
7226 +
7227 +       if (op->nsegs > 0) {
7228 +               int i;
7229 +
7230 +               for (i = 0; i < op->nsegs-1; i++) {
7231 +                       if (op->segs[i].ds_len % sc->sc_max_dsize)
7232 +                               return (0);
7233 +                       if (op->segs[i].ds_len != sc->sc_max_dsize)
7234 +                               result = 2;
7235 +               }
7236 +       }
7237 +       return (result);
7238 +}
7239 +
7240 +static int
7241 +safe_kprocess(device_t dev, struct cryptkop *krp, int hint)
7242 +{
7243 +       struct safe_softc *sc = device_get_softc(dev);
7244 +       struct safe_pkq *q;
7245 +       unsigned long flags;
7246 +
7247 +       DPRINTF(("%s()\n", __FUNCTION__));
7248 +
7249 +       if (sc == NULL) {
7250 +               krp->krp_status = EINVAL;
7251 +               goto err;
7252 +       }
7253 +
7254 +       if (krp->krp_op != CRK_MOD_EXP) {
7255 +               krp->krp_status = EOPNOTSUPP;
7256 +               goto err;
7257 +       }
7258 +
7259 +       q = (struct safe_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
7260 +       if (q == NULL) {
7261 +               krp->krp_status = ENOMEM;
7262 +               goto err;
7263 +       }
7264 +       memset(q, 0, sizeof(*q));
7265 +       q->pkq_krp = krp;
7266 +       INIT_LIST_HEAD(&q->pkq_list);
7267 +
7268 +       spin_lock_irqsave(&sc->sc_pkmtx, flags);
7269 +       list_add_tail(&q->pkq_list, &sc->sc_pkq);
7270 +       safe_kfeed(sc);
7271 +       spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
7272 +       return (0);
7273 +
7274 +err:
7275 +       crypto_kdone(krp);
7276 +       return (0);
7277 +}
7278 +
7279 +#define        SAFE_CRK_PARAM_BASE     0
7280 +#define        SAFE_CRK_PARAM_EXP      1
7281 +#define        SAFE_CRK_PARAM_MOD      2
7282 +
7283 +static int
7284 +safe_kstart(struct safe_softc *sc)
7285 +{
7286 +       struct cryptkop *krp = sc->sc_pkq_cur->pkq_krp;
7287 +       int exp_bits, mod_bits, base_bits;
7288 +       u_int32_t op, a_off, b_off, c_off, d_off;
7289 +
7290 +       DPRINTF(("%s()\n", __FUNCTION__));
7291 +
7292 +       if (krp->krp_iparams < 3 || krp->krp_oparams != 1) {
7293 +               krp->krp_status = EINVAL;
7294 +               return (1);
7295 +       }
7296 +
7297 +       base_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_BASE]);
7298 +       if (base_bits > 2048)
7299 +               goto too_big;
7300 +       if (base_bits <= 0)             /* 5. base not zero */
7301 +               goto too_small;
7302 +
7303 +       exp_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_EXP]);
7304 +       if (exp_bits > 2048)
7305 +               goto too_big;
7306 +       if (exp_bits <= 0)              /* 1. exponent word length > 0 */
7307 +               goto too_small;         /* 4. exponent not zero */
7308 +
7309 +       mod_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_MOD]);
7310 +       if (mod_bits > 2048)
7311 +               goto too_big;
7312 +       if (mod_bits <= 32)             /* 2. modulus word length > 1 */
7313 +               goto too_small;         /* 8. MSW of modulus != zero */
7314 +       if (mod_bits < exp_bits)        /* 3 modulus len >= exponent len */
7315 +               goto too_small;
7316 +       if ((krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p[0] & 1) == 0)
7317 +               goto bad_domain;        /* 6. modulus is odd */
7318 +       if (mod_bits > krp->krp_param[krp->krp_iparams].crp_nbits)
7319 +               goto too_small;         /* make sure result will fit */
7320 +
7321 +       /* 7. modulus > base */
7322 +       if (mod_bits < base_bits)
7323 +               goto too_small;
7324 +       if (mod_bits == base_bits) {
7325 +               u_int8_t *basep, *modp;
7326 +               int i;
7327 +
7328 +               basep = krp->krp_param[SAFE_CRK_PARAM_BASE].crp_p +
7329 +                   ((base_bits + 7) / 8) - 1;
7330 +               modp = krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p +
7331 +                   ((mod_bits + 7) / 8) - 1;
7332 +               
7333 +               for (i = 0; i < (mod_bits + 7) / 8; i++, basep--, modp--) {
7334 +                       if (*modp < *basep)
7335 +                               goto too_small;
7336 +                       if (*modp > *basep)
7337 +                               break;
7338 +               }
7339 +       }
7340 +
7341 +       /* And on the 9th step, he rested. */
7342 +
7343 +       WRITE_REG(sc, SAFE_PK_A_LEN, (exp_bits + 31) / 32);
7344 +       WRITE_REG(sc, SAFE_PK_B_LEN, (mod_bits + 31) / 32);
7345 +       if (mod_bits > 1024) {
7346 +               op = SAFE_PK_FUNC_EXP4;
7347 +               a_off = 0x000;
7348 +               b_off = 0x100;
7349 +               c_off = 0x200;
7350 +               d_off = 0x300;
7351 +       } else {
7352 +               op = SAFE_PK_FUNC_EXP16;
7353 +               a_off = 0x000;
7354 +               b_off = 0x080;
7355 +               c_off = 0x100;
7356 +               d_off = 0x180;
7357 +       }
7358 +       sc->sc_pk_reslen = b_off - a_off;
7359 +       sc->sc_pk_resoff = d_off;
7360 +
7361 +       /* A is exponent, B is modulus, C is base, D is result */
7362 +       safe_kload_reg(sc, a_off, b_off - a_off,
7363 +           &krp->krp_param[SAFE_CRK_PARAM_EXP]);
7364 +       WRITE_REG(sc, SAFE_PK_A_ADDR, a_off >> 2);
7365 +       safe_kload_reg(sc, b_off, b_off - a_off,
7366 +           &krp->krp_param[SAFE_CRK_PARAM_MOD]);
7367 +       WRITE_REG(sc, SAFE_PK_B_ADDR, b_off >> 2);
7368 +       safe_kload_reg(sc, c_off, b_off - a_off,
7369 +           &krp->krp_param[SAFE_CRK_PARAM_BASE]);
7370 +       WRITE_REG(sc, SAFE_PK_C_ADDR, c_off >> 2);
7371 +       WRITE_REG(sc, SAFE_PK_D_ADDR, d_off >> 2);
7372 +
7373 +       WRITE_REG(sc, SAFE_PK_FUNC, op | SAFE_PK_FUNC_RUN);
7374 +
7375 +       return (0);
7376 +
7377 +too_big:
7378 +       krp->krp_status = E2BIG;
7379 +       return (1);
7380 +too_small:
7381 +       krp->krp_status = ERANGE;
7382 +       return (1);
7383 +bad_domain:
7384 +       krp->krp_status = EDOM;
7385 +       return (1);
7386 +}
7387 +
7388 +static int
7389 +safe_ksigbits(struct safe_softc *sc, struct crparam *cr)
7390 +{
7391 +       u_int plen = (cr->crp_nbits + 7) / 8;
7392 +       int i, sig = plen * 8;
7393 +       u_int8_t c, *p = cr->crp_p;
7394 +
7395 +       DPRINTF(("%s()\n", __FUNCTION__));
7396 +
7397 +       for (i = plen - 1; i >= 0; i--) {
7398 +               c = p[i];
7399 +               if (c != 0) {
7400 +                       while ((c & 0x80) == 0) {
7401 +                               sig--;
7402 +                               c <<= 1;
7403 +                       }
7404 +                       break;
7405 +               }
7406 +               sig -= 8;
7407 +       }
7408 +       return (sig);
7409 +}
7410 +
7411 +static void
7412 +safe_kfeed(struct safe_softc *sc)
7413 +{
7414 +       struct safe_pkq *q, *tmp;
7415 +
7416 +       DPRINTF(("%s()\n", __FUNCTION__));
7417 +
7418 +       if (list_empty(&sc->sc_pkq) && sc->sc_pkq_cur == NULL)
7419 +               return;
7420 +       if (sc->sc_pkq_cur != NULL)
7421 +               return;
7422 +       list_for_each_entry_safe(q, tmp, &sc->sc_pkq, pkq_list) {
7423 +               sc->sc_pkq_cur = q;
7424 +               list_del(&q->pkq_list);
7425 +               if (safe_kstart(sc) != 0) {
7426 +                       crypto_kdone(q->pkq_krp);
7427 +                       kfree(q);
7428 +                       sc->sc_pkq_cur = NULL;
7429 +               } else {
7430 +                       /* op started, start polling */
7431 +                       mod_timer(&sc->sc_pkto, jiffies + 1);
7432 +                       break;
7433 +               }
7434 +       }
7435 +}
7436 +
7437 +static void
7438 +safe_kpoll(unsigned long arg)
7439 +{
7440 +       struct safe_softc *sc = NULL;
7441 +       struct safe_pkq *q;
7442 +       struct crparam *res;
7443 +       int i;
7444 +       u_int32_t buf[64];
7445 +       unsigned long flags;
7446 +
7447 +       DPRINTF(("%s()\n", __FUNCTION__));
7448 +
7449 +       if (arg >= SAFE_MAX_CHIPS)
7450 +               return;
7451 +       sc = safe_chip_idx[arg];
7452 +       if (!sc) {
7453 +               DPRINTF(("%s() - bad callback\n", __FUNCTION__));
7454 +               return;
7455 +       }
7456 +
7457 +       spin_lock_irqsave(&sc->sc_pkmtx, flags);
7458 +       if (sc->sc_pkq_cur == NULL)
7459 +               goto out;
7460 +       if (READ_REG(sc, SAFE_PK_FUNC) & SAFE_PK_FUNC_RUN) {
7461 +               /* still running, check back later */
7462 +               mod_timer(&sc->sc_pkto, jiffies + 1);
7463 +               goto out;
7464 +       }
7465 +
7466 +       q = sc->sc_pkq_cur;
7467 +       res = &q->pkq_krp->krp_param[q->pkq_krp->krp_iparams];
7468 +       bzero(buf, sizeof(buf));
7469 +       bzero(res->crp_p, (res->crp_nbits + 7) / 8);
7470 +       for (i = 0; i < sc->sc_pk_reslen >> 2; i++)
7471 +               buf[i] = le32_to_cpu(READ_REG(sc, SAFE_PK_RAM_START +
7472 +                   sc->sc_pk_resoff + (i << 2)));
7473 +       bcopy(buf, res->crp_p, (res->crp_nbits + 7) / 8);
7474 +       /*
7475 +        * reduce the bits that need copying if possible
7476 +        */
7477 +       res->crp_nbits = min(res->crp_nbits,sc->sc_pk_reslen * 8);
7478 +       res->crp_nbits = safe_ksigbits(sc, res);
7479 +
7480 +       for (i = SAFE_PK_RAM_START; i < SAFE_PK_RAM_END; i += 4)
7481 +               WRITE_REG(sc, i, 0);
7482 +
7483 +       crypto_kdone(q->pkq_krp);
7484 +       kfree(q);
7485 +       sc->sc_pkq_cur = NULL;
7486 +
7487 +       safe_kfeed(sc);
7488 +out:
7489 +       spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
7490 +}
7491 +
7492 +static void
7493 +safe_kload_reg(struct safe_softc *sc, u_int32_t off, u_int32_t len,
7494 +    struct crparam *n)
7495 +{
7496 +       u_int32_t buf[64], i;
7497 +
7498 +       DPRINTF(("%s()\n", __FUNCTION__));
7499 +
7500 +       bzero(buf, sizeof(buf));
7501 +       bcopy(n->crp_p, buf, (n->crp_nbits + 7) / 8);
7502 +
7503 +       for (i = 0; i < len >> 2; i++)
7504 +               WRITE_REG(sc, SAFE_PK_RAM_START + off + (i << 2),
7505 +                   cpu_to_le32(buf[i]));
7506 +}
7507 +
7508 +#ifdef SAFE_DEBUG
7509 +static void
7510 +safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
7511 +{
7512 +       printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
7513 +               , tag
7514 +               , READ_REG(sc, SAFE_DMA_ENDIAN)
7515 +               , READ_REG(sc, SAFE_DMA_SRCADDR)
7516 +               , READ_REG(sc, SAFE_DMA_DSTADDR)
7517 +               , READ_REG(sc, SAFE_DMA_STAT)
7518 +       );
7519 +}
7520 +
7521 +static void
7522 +safe_dump_intrstate(struct safe_softc *sc, const char *tag)
7523 +{
7524 +       printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
7525 +               , tag
7526 +               , READ_REG(sc, SAFE_HI_CFG)
7527 +               , READ_REG(sc, SAFE_HI_MASK)
7528 +               , READ_REG(sc, SAFE_HI_DESC_CNT)
7529 +               , READ_REG(sc, SAFE_HU_STAT)
7530 +               , READ_REG(sc, SAFE_HM_STAT)
7531 +       );
7532 +}
7533 +
7534 +static void
7535 +safe_dump_ringstate(struct safe_softc *sc, const char *tag)
7536 +{
7537 +       u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
7538 +
7539 +       /* NB: assume caller has lock on ring */
7540 +       printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
7541 +               tag,
7542 +               estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
7543 +               (unsigned long)(sc->sc_back - sc->sc_ring),
7544 +               (unsigned long)(sc->sc_front - sc->sc_ring));
7545 +}
7546 +
7547 +static void
7548 +safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
7549 +{
7550 +       int ix, nsegs;
7551 +
7552 +       ix = re - sc->sc_ring;
7553 +       printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
7554 +               , tag
7555 +               , re, ix
7556 +               , re->re_desc.d_csr
7557 +               , re->re_desc.d_src
7558 +               , re->re_desc.d_dst
7559 +               , re->re_desc.d_sa
7560 +               , re->re_desc.d_len
7561 +       );
7562 +       if (re->re_src.nsegs > 1) {
7563 +               ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
7564 +                       sizeof(struct safe_pdesc);
7565 +               for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
7566 +                       printf(" spd[%u] %p: %p size %u flags %x"
7567 +                               , ix, &sc->sc_spring[ix]
7568 +                               , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
7569 +                               , sc->sc_spring[ix].pd_size
7570 +                               , sc->sc_spring[ix].pd_flags
7571 +                       );
7572 +                       if (sc->sc_spring[ix].pd_size == 0)
7573 +                               printf(" (zero!)");
7574 +                       printf("\n");
7575 +                       if (++ix == SAFE_TOTAL_SPART)
7576 +                               ix = 0;
7577 +               }
7578 +       }
7579 +       if (re->re_dst.nsegs > 1) {
7580 +               ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
7581 +                       sizeof(struct safe_pdesc);
7582 +               for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
7583 +                       printf(" dpd[%u] %p: %p flags %x\n"
7584 +                               , ix, &sc->sc_dpring[ix]
7585 +                               , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
7586 +                               , sc->sc_dpring[ix].pd_flags
7587 +                       );
7588 +                       if (++ix == SAFE_TOTAL_DPART)
7589 +                               ix = 0;
7590 +               }
7591 +       }
7592 +       printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
7593 +               re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
7594 +       printf("sa: key %x %x %x %x %x %x %x %x\n"
7595 +               , re->re_sa.sa_key[0]
7596 +               , re->re_sa.sa_key[1]
7597 +               , re->re_sa.sa_key[2]
7598 +               , re->re_sa.sa_key[3]
7599 +               , re->re_sa.sa_key[4]
7600 +               , re->re_sa.sa_key[5]
7601 +               , re->re_sa.sa_key[6]
7602 +               , re->re_sa.sa_key[7]
7603 +       );
7604 +       printf("sa: indigest %x %x %x %x %x\n"
7605 +               , re->re_sa.sa_indigest[0]
7606 +               , re->re_sa.sa_indigest[1]
7607 +               , re->re_sa.sa_indigest[2]
7608 +               , re->re_sa.sa_indigest[3]
7609 +               , re->re_sa.sa_indigest[4]
7610 +       );
7611 +       printf("sa: outdigest %x %x %x %x %x\n"
7612 +               , re->re_sa.sa_outdigest[0]
7613 +               , re->re_sa.sa_outdigest[1]
7614 +               , re->re_sa.sa_outdigest[2]
7615 +               , re->re_sa.sa_outdigest[3]
7616 +               , re->re_sa.sa_outdigest[4]
7617 +       );
7618 +       printf("sr: iv %x %x %x %x\n"
7619 +               , re->re_sastate.sa_saved_iv[0]
7620 +               , re->re_sastate.sa_saved_iv[1]
7621 +               , re->re_sastate.sa_saved_iv[2]
7622 +               , re->re_sastate.sa_saved_iv[3]
7623 +       );
7624 +       printf("sr: hashbc %u indigest %x %x %x %x %x\n"
7625 +               , re->re_sastate.sa_saved_hashbc
7626 +               , re->re_sastate.sa_saved_indigest[0]
7627 +               , re->re_sastate.sa_saved_indigest[1]
7628 +               , re->re_sastate.sa_saved_indigest[2]
7629 +               , re->re_sastate.sa_saved_indigest[3]
7630 +               , re->re_sastate.sa_saved_indigest[4]
7631 +       );
7632 +}
7633 +
7634 +static void
7635 +safe_dump_ring(struct safe_softc *sc, const char *tag)
7636 +{
7637 +       unsigned long flags;
7638 +
7639 +       spin_lock_irqsave(&sc->sc_ringmtx, flags);
7640 +       printf("\nSafeNet Ring State:\n");
7641 +       safe_dump_intrstate(sc, tag);
7642 +       safe_dump_dmastatus(sc, tag);
7643 +       safe_dump_ringstate(sc, tag);
7644 +       if (sc->sc_nqchip) {
7645 +               struct safe_ringentry *re = sc->sc_back;
7646 +               do {
7647 +                       safe_dump_request(sc, tag, re);
7648 +                       if (++re == sc->sc_ringtop)
7649 +                               re = sc->sc_ring;
7650 +               } while (re != sc->sc_front);
7651 +       }
7652 +       spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
7653 +}
7654 +#endif /* SAFE_DEBUG */
7655 +
7656 +
7657 +static int safe_probe(struct pci_dev *dev, const struct pci_device_id *ent)
7658 +{
7659 +       struct safe_softc *sc = NULL;
7660 +       u32 mem_start, mem_len, cmd;
7661 +       int i, rc, devinfo;
7662 +       dma_addr_t raddr;
7663 +       static int num_chips = 0;
7664 +
7665 +       DPRINTF(("%s()\n", __FUNCTION__));
7666 +
7667 +       if (pci_enable_device(dev) < 0)
7668 +               return(-ENODEV);
7669 +
7670 +       if (!dev->irq) {
7671 +               printk("safe: found device with no IRQ assigned. check BIOS settings!");
7672 +               pci_disable_device(dev);
7673 +               return(-ENODEV);
7674 +       }
7675 +
7676 +       if (pci_set_mwi(dev)) {
7677 +               printk("safe: pci_set_mwi failed!");
7678 +               return(-ENODEV);
7679 +       }
7680 +
7681 +       sc = (struct safe_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
7682 +       if (!sc)
7683 +               return(-ENOMEM);
7684 +       memset(sc, 0, sizeof(*sc));
7685 +
7686 +       softc_device_init(sc, "safe", num_chips, safe_methods);
7687 +
7688 +       sc->sc_irq = -1;
7689 +       sc->sc_cid = -1;
7690 +       sc->sc_pcidev = dev;
7691 +       if (num_chips < SAFE_MAX_CHIPS) {
7692 +               safe_chip_idx[device_get_unit(sc->sc_dev)] = sc;
7693 +               num_chips++;
7694 +       }
7695 +
7696 +       INIT_LIST_HEAD(&sc->sc_pkq);
7697 +       spin_lock_init(&sc->sc_pkmtx);
7698 +
7699 +       pci_set_drvdata(sc->sc_pcidev, sc);
7700 +
7701 +       /* we read its hardware registers as memory */
7702 +       mem_start = pci_resource_start(sc->sc_pcidev, 0);
7703 +       mem_len   = pci_resource_len(sc->sc_pcidev, 0);
7704 +
7705 +       sc->sc_base_addr = (ocf_iomem_t) ioremap(mem_start, mem_len);
7706 +       if (!sc->sc_base_addr) {
7707 +               device_printf(sc->sc_dev, "failed to ioremap 0x%x-0x%x\n",
7708 +                               mem_start, mem_start + mem_len - 1);
7709 +               goto out;
7710 +       }
7711 +
7712 +       /* fix up the bus size */
7713 +       if (pci_set_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
7714 +               device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
7715 +               goto out;
7716 +       }
7717 +       if (pci_set_consistent_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
7718 +               device_printf(sc->sc_dev, "No usable consistent DMA configuration, aborting.\n");
7719 +               goto out;
7720 +       }
7721 +
7722 +       pci_set_master(sc->sc_pcidev);
7723 +
7724 +       pci_read_config_dword(sc->sc_pcidev, PCI_COMMAND, &cmd);
7725 +
7726 +       if (!(cmd & PCI_COMMAND_MEMORY)) {
7727 +               device_printf(sc->sc_dev, "failed to enable memory mapping\n");
7728 +               goto out;
7729 +       }
7730 +
7731 +       if (!(cmd & PCI_COMMAND_MASTER)) {
7732 +               device_printf(sc->sc_dev, "failed to enable bus mastering\n");
7733 +               goto out;
7734 +       }
7735 +
7736 +       rc = request_irq(dev->irq, safe_intr, IRQF_SHARED, "safe", sc);
7737 +       if (rc) {
7738 +               device_printf(sc->sc_dev, "failed to hook irq %d\n", sc->sc_irq);
7739 +               goto out;
7740 +       }
7741 +       sc->sc_irq = dev->irq;
7742 +
7743 +       sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
7744 +                       (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
7745 +
7746 +       /*
7747 +        * Allocate packet engine descriptors.
7748 +        */
7749 +       sc->sc_ringalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
7750 +                       SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
7751 +                       &sc->sc_ringalloc.dma_paddr);
7752 +       if (!sc->sc_ringalloc.dma_vaddr) {
7753 +               device_printf(sc->sc_dev, "cannot allocate PE descriptor ring\n");
7754 +               goto out;
7755 +       }
7756 +
7757 +       /*
7758 +        * Hookup the static portion of all our data structures.
7759 +        */
7760 +       sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
7761 +       sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
7762 +       sc->sc_front = sc->sc_ring;
7763 +       sc->sc_back = sc->sc_ring;
7764 +       raddr = sc->sc_ringalloc.dma_paddr;
7765 +       bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
7766 +       for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
7767 +               struct safe_ringentry *re = &sc->sc_ring[i];
7768 +
7769 +               re->re_desc.d_sa = raddr +
7770 +                       offsetof(struct safe_ringentry, re_sa);
7771 +               re->re_sa.sa_staterec = raddr +
7772 +                       offsetof(struct safe_ringentry, re_sastate);
7773 +
7774 +               raddr += sizeof (struct safe_ringentry);
7775 +       }
7776 +       spin_lock_init(&sc->sc_ringmtx);
7777 +
7778 +       /*
7779 +        * Allocate scatter and gather particle descriptors.
7780 +        */
7781 +       sc->sc_spalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
7782 +                       SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
7783 +                       &sc->sc_spalloc.dma_paddr);
7784 +       if (!sc->sc_spalloc.dma_vaddr) {
7785 +               device_printf(sc->sc_dev, "cannot allocate source particle descriptor ring\n");
7786 +               goto out;
7787 +       }
7788 +       sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
7789 +       sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
7790 +       sc->sc_spfree = sc->sc_spring;
7791 +       bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
7792 +
7793 +       sc->sc_dpalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
7794 +                       SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7795 +                       &sc->sc_dpalloc.dma_paddr);
7796 +       if (!sc->sc_dpalloc.dma_vaddr) {
7797 +               device_printf(sc->sc_dev, "cannot allocate destination particle descriptor ring\n");
7798 +               goto out;
7799 +       }
7800 +       sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
7801 +       sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
7802 +       sc->sc_dpfree = sc->sc_dpring;
7803 +       bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
7804 +
7805 +       sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
7806 +       if (sc->sc_cid < 0) {
7807 +               device_printf(sc->sc_dev, "could not get crypto driver id\n");
7808 +               goto out;
7809 +       }
7810 +
7811 +       printf("%s:", device_get_nameunit(sc->sc_dev));
7812 +
7813 +       devinfo = READ_REG(sc, SAFE_DEVINFO);
7814 +       if (devinfo & SAFE_DEVINFO_RNG) {
7815 +               sc->sc_flags |= SAFE_FLAGS_RNG;
7816 +               printf(" rng");
7817 +       }
7818 +       if (devinfo & SAFE_DEVINFO_PKEY) {
7819 +               printf(" key");
7820 +               sc->sc_flags |= SAFE_FLAGS_KEY;
7821 +               crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
7822 +#if 0
7823 +               crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
7824 +#endif
7825 +               init_timer(&sc->sc_pkto);
7826 +               sc->sc_pkto.function = safe_kpoll;
7827 +               sc->sc_pkto.data = (unsigned long) device_get_unit(sc->sc_dev);
7828 +       }
7829 +       if (devinfo & SAFE_DEVINFO_DES) {
7830 +               printf(" des/3des");
7831 +               crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
7832 +               crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
7833 +       }
7834 +       if (devinfo & SAFE_DEVINFO_AES) {
7835 +               printf(" aes");
7836 +               crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
7837 +       }
7838 +       if (devinfo & SAFE_DEVINFO_MD5) {
7839 +               printf(" md5");
7840 +               crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
7841 +       }
7842 +       if (devinfo & SAFE_DEVINFO_SHA1) {
7843 +               printf(" sha1");
7844 +               crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
7845 +       }
7846 +       printf(" null");
7847 +       crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
7848 +       crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
7849 +       /* XXX other supported algorithms */
7850 +       printf("\n");
7851 +
7852 +       safe_reset_board(sc);           /* reset h/w */
7853 +       safe_init_board(sc);            /* init h/w */
7854 +
7855 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
7856 +       if (sc->sc_flags & SAFE_FLAGS_RNG) {
7857 +               safe_rng_init(sc);
7858 +               crypto_rregister(sc->sc_cid, safe_read_random, sc);
7859 +       }
7860 +#endif /* SAFE_NO_RNG */
7861 +
7862 +       return (0);
7863 +
7864 +out:
7865 +       if (sc->sc_cid >= 0)
7866 +               crypto_unregister_all(sc->sc_cid);
7867 +       if (sc->sc_irq != -1)
7868 +               free_irq(sc->sc_irq, sc);
7869 +       if (sc->sc_ringalloc.dma_vaddr)
7870 +               pci_free_consistent(sc->sc_pcidev,
7871 +                               SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
7872 +                               sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
7873 +       if (sc->sc_spalloc.dma_vaddr)
7874 +               pci_free_consistent(sc->sc_pcidev,
7875 +                               SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7876 +                               sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
7877 +       if (sc->sc_dpalloc.dma_vaddr)
7878 +               pci_free_consistent(sc->sc_pcidev,
7879 +                               SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7880 +                               sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
7881 +       kfree(sc);
7882 +       return(-ENODEV);
7883 +}
7884 +
7885 +static void safe_remove(struct pci_dev *dev)
7886 +{
7887 +       struct safe_softc *sc = pci_get_drvdata(dev);
7888 +
7889 +       DPRINTF(("%s()\n", __FUNCTION__));
7890 +
7891 +       /* XXX wait/abort active ops */
7892 +
7893 +       WRITE_REG(sc, SAFE_HI_MASK, 0);         /* disable interrupts */
7894 +
7895 +       del_timer_sync(&sc->sc_pkto);
7896 +
7897 +       crypto_unregister_all(sc->sc_cid);
7898 +
7899 +       safe_cleanchip(sc);
7900 +
7901 +       if (sc->sc_irq != -1)
7902 +               free_irq(sc->sc_irq, sc);
7903 +       if (sc->sc_ringalloc.dma_vaddr)
7904 +               pci_free_consistent(sc->sc_pcidev,
7905 +                               SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
7906 +                               sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
7907 +       if (sc->sc_spalloc.dma_vaddr)
7908 +               pci_free_consistent(sc->sc_pcidev,
7909 +                               SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7910 +                               sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
7911 +       if (sc->sc_dpalloc.dma_vaddr)
7912 +               pci_free_consistent(sc->sc_pcidev,
7913 +                               SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7914 +                               sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
7915 +       sc->sc_irq = -1;
7916 +       sc->sc_ringalloc.dma_vaddr = NULL;
7917 +       sc->sc_spalloc.dma_vaddr = NULL;
7918 +       sc->sc_dpalloc.dma_vaddr = NULL;
7919 +}
7920 +
7921 +static struct pci_device_id safe_pci_tbl[] = {
7922 +       { PCI_VENDOR_SAFENET, PCI_PRODUCT_SAFEXCEL,
7923 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
7924 +       { },
7925 +};
7926 +MODULE_DEVICE_TABLE(pci, safe_pci_tbl);
7927 +
7928 +static struct pci_driver safe_driver = {
7929 +       .name         = "safe",
7930 +       .id_table     = safe_pci_tbl,
7931 +       .probe        = safe_probe,
7932 +       .remove       = safe_remove,
7933 +       /* add PM stuff here one day */
7934 +};
7935 +
7936 +static int __init safe_init (void)
7937 +{
7938 +       struct safe_softc *sc = NULL;
7939 +       int rc;
7940 +
7941 +       DPRINTF(("%s(%p)\n", __FUNCTION__, safe_init));
7942 +
7943 +       rc = pci_register_driver(&safe_driver);
7944 +       pci_register_driver_compat(&safe_driver, rc);
7945 +
7946 +       return rc;
7947 +}
7948 +
7949 +static void __exit safe_exit (void)
7950 +{
7951 +       pci_unregister_driver(&safe_driver);
7952 +}
7953 +
7954 +module_init(safe_init);
7955 +module_exit(safe_exit);
7956 +
7957 +MODULE_LICENSE("BSD");
7958 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
7959 +MODULE_DESCRIPTION("OCF driver for safenet PCI crypto devices");
7960 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
7961 +++ linux/crypto/ocf/safe/sha1.c        2005-05-20 10:30:53.000000000 +1000
7962 @@ -0,0 +1,279 @@
7963 +/*     $KAME: sha1.c,v 1.5 2000/11/08 06:13:08 itojun Exp $    */
7964 +/*
7965 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
7966 + * All rights reserved.
7967 + *
7968 + * Redistribution and use in source and binary forms, with or without
7969 + * modification, are permitted provided that the following conditions
7970 + * are met:
7971 + * 1. Redistributions of source code must retain the above copyright
7972 + *    notice, this list of conditions and the following disclaimer.
7973 + * 2. Redistributions in binary form must reproduce the above copyright
7974 + *    notice, this list of conditions and the following disclaimer in the
7975 + *    documentation and/or other materials provided with the distribution.
7976 + * 3. Neither the name of the project nor the names of its contributors
7977 + *    may be used to endorse or promote products derived from this software
7978 + *    without specific prior written permission.
7979 + *
7980 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
7981 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
7982 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
7983 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
7984 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
7985 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
7986 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
7987 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
7988 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
7989 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
7990 + * SUCH DAMAGE.
7991 + */
7992 +
7993 +/*
7994 + * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
7995 + * based on: http://csrc.nist.gov/fips/fip180-1.txt
7996 + * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
7997 + */
7998 +
7999 +#if 0
8000 +#include <sys/cdefs.h>
8001 +__FBSDID("$FreeBSD: src/sys/crypto/sha1.c,v 1.9 2003/06/10 21:36:57 obrien Exp $");
8002 +
8003 +#include <sys/types.h>
8004 +#include <sys/cdefs.h>
8005 +#include <sys/time.h>
8006 +#include <sys/systm.h>
8007 +
8008 +#include <crypto/sha1.h>
8009 +#endif
8010 +
8011 +/* sanity check */
8012 +#if BYTE_ORDER != BIG_ENDIAN
8013 +# if BYTE_ORDER != LITTLE_ENDIAN
8014 +#  define unsupported 1
8015 +# endif
8016 +#endif
8017 +
8018 +#ifndef unsupported
8019 +
8020 +/* constant table */
8021 +static u_int32_t _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 };
8022 +#define        K(t)    _K[(t) / 20]
8023 +
8024 +#define        F0(b, c, d)     (((b) & (c)) | ((~(b)) & (d)))
8025 +#define        F1(b, c, d)     (((b) ^ (c)) ^ (d))
8026 +#define        F2(b, c, d)     (((b) & (c)) | ((b) & (d)) | ((c) & (d)))
8027 +#define        F3(b, c, d)     (((b) ^ (c)) ^ (d))
8028 +
8029 +#define        S(n, x)         (((x) << (n)) | ((x) >> (32 - n)))
8030 +
8031 +#undef H
8032 +#define        H(n)    (ctxt->h.b32[(n)])
8033 +#define        COUNT   (ctxt->count)
8034 +#define        BCOUNT  (ctxt->c.b64[0] / 8)
8035 +#define        W(n)    (ctxt->m.b32[(n)])
8036 +
8037 +#define        PUTBYTE(x)      { \
8038 +       ctxt->m.b8[(COUNT % 64)] = (x);         \
8039 +       COUNT++;                                \
8040 +       COUNT %= 64;                            \
8041 +       ctxt->c.b64[0] += 8;                    \
8042 +       if (COUNT % 64 == 0)                    \
8043 +               sha1_step(ctxt);                \
8044 +     }
8045 +
8046 +#define        PUTPAD(x)       { \
8047 +       ctxt->m.b8[(COUNT % 64)] = (x);         \
8048 +       COUNT++;                                \
8049 +       COUNT %= 64;                            \
8050 +       if (COUNT % 64 == 0)                    \
8051 +               sha1_step(ctxt);                \
8052 +     }
8053 +
8054 +static void sha1_step(struct sha1_ctxt *);
8055 +
8056 +static void
8057 +sha1_step(ctxt)
8058 +       struct sha1_ctxt *ctxt;
8059 +{
8060 +       u_int32_t       a, b, c, d, e;
8061 +       size_t t, s;
8062 +       u_int32_t       tmp;
8063 +
8064 +#if BYTE_ORDER == LITTLE_ENDIAN
8065 +       struct sha1_ctxt tctxt;
8066 +       bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64);
8067 +       ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2];
8068 +       ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0];
8069 +       ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6];
8070 +       ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4];
8071 +       ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10];
8072 +       ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8];
8073 +       ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14];
8074 +       ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12];
8075 +       ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18];
8076 +       ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16];
8077 +       ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22];
8078 +       ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20];
8079 +       ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26];
8080 +       ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24];
8081 +       ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30];
8082 +       ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28];
8083 +       ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34];
8084 +       ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32];
8085 +       ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38];
8086 +       ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36];
8087 +       ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42];
8088 +       ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40];
8089 +       ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46];
8090 +       ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44];
8091 +       ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50];
8092 +       ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48];
8093 +       ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54];
8094 +       ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52];
8095 +       ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58];
8096 +       ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56];
8097 +       ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62];
8098 +       ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60];
8099 +#endif
8100 +
8101 +       a = H(0); b = H(1); c = H(2); d = H(3); e = H(4);
8102 +
8103 +       for (t = 0; t < 20; t++) {
8104 +               s = t & 0x0f;
8105 +               if (t >= 16) {
8106 +                       W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8107 +               }
8108 +               tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t);
8109 +               e = d; d = c; c = S(30, b); b = a; a = tmp;
8110 +       }
8111 +       for (t = 20; t < 40; t++) {
8112 +               s = t & 0x0f;
8113 +               W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8114 +               tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t);
8115 +               e = d; d = c; c = S(30, b); b = a; a = tmp;
8116 +       }
8117 +       for (t = 40; t < 60; t++) {
8118 +               s = t & 0x0f;
8119 +               W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8120 +               tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t);
8121 +               e = d; d = c; c = S(30, b); b = a; a = tmp;
8122 +       }
8123 +       for (t = 60; t < 80; t++) {
8124 +               s = t & 0x0f;
8125 +               W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8126 +               tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t);
8127 +               e = d; d = c; c = S(30, b); b = a; a = tmp;
8128 +       }
8129 +
8130 +       H(0) = H(0) + a;
8131 +       H(1) = H(1) + b;
8132 +       H(2) = H(2) + c;
8133 +       H(3) = H(3) + d;
8134 +       H(4) = H(4) + e;
8135 +
8136 +       bzero(&ctxt->m.b8[0], 64);
8137 +}
8138 +
8139 +/*------------------------------------------------------------*/
8140 +
8141 +void
8142 +sha1_init(ctxt)
8143 +       struct sha1_ctxt *ctxt;
8144 +{
8145 +       bzero(ctxt, sizeof(struct sha1_ctxt));
8146 +       H(0) = 0x67452301;
8147 +       H(1) = 0xefcdab89;
8148 +       H(2) = 0x98badcfe;
8149 +       H(3) = 0x10325476;
8150 +       H(4) = 0xc3d2e1f0;
8151 +}
8152 +
8153 +void
8154 +sha1_pad(ctxt)
8155 +       struct sha1_ctxt *ctxt;
8156 +{
8157 +       size_t padlen;          /*pad length in bytes*/
8158 +       size_t padstart;
8159 +
8160 +       PUTPAD(0x80);
8161 +
8162 +       padstart = COUNT % 64;
8163 +       padlen = 64 - padstart;
8164 +       if (padlen < 8) {
8165 +               bzero(&ctxt->m.b8[padstart], padlen);
8166 +               COUNT += padlen;
8167 +               COUNT %= 64;
8168 +               sha1_step(ctxt);
8169 +               padstart = COUNT % 64;  /* should be 0 */
8170 +               padlen = 64 - padstart; /* should be 64 */
8171 +       }
8172 +       bzero(&ctxt->m.b8[padstart], padlen - 8);
8173 +       COUNT += (padlen - 8);
8174 +       COUNT %= 64;
8175 +#if BYTE_ORDER == BIG_ENDIAN
8176 +       PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]);
8177 +       PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]);
8178 +       PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]);
8179 +       PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]);
8180 +#else
8181 +       PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]);
8182 +       PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]);
8183 +       PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]);
8184 +       PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]);
8185 +#endif
8186 +}
8187 +
8188 +void
8189 +sha1_loop(ctxt, input, len)
8190 +       struct sha1_ctxt *ctxt;
8191 +       const u_int8_t *input;
8192 +       size_t len;
8193 +{
8194 +       size_t gaplen;
8195 +       size_t gapstart;
8196 +       size_t off;
8197 +       size_t copysiz;
8198 +
8199 +       off = 0;
8200 +
8201 +       while (off < len) {
8202 +               gapstart = COUNT % 64;
8203 +               gaplen = 64 - gapstart;
8204 +
8205 +               copysiz = (gaplen < len - off) ? gaplen : len - off;
8206 +               bcopy(&input[off], &ctxt->m.b8[gapstart], copysiz);
8207 +               COUNT += copysiz;
8208 +               COUNT %= 64;
8209 +               ctxt->c.b64[0] += copysiz * 8;
8210 +               if (COUNT % 64 == 0)
8211 +                       sha1_step(ctxt);
8212 +               off += copysiz;
8213 +       }
8214 +}
8215 +
8216 +void
8217 +sha1_result(ctxt, digest0)
8218 +       struct sha1_ctxt *ctxt;
8219 +       caddr_t digest0;
8220 +{
8221 +       u_int8_t *digest;
8222 +
8223 +       digest = (u_int8_t *)digest0;
8224 +       sha1_pad(ctxt);
8225 +#if BYTE_ORDER == BIG_ENDIAN
8226 +       bcopy(&ctxt->h.b8[0], digest, 20);
8227 +#else
8228 +       digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2];
8229 +       digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0];
8230 +       digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6];
8231 +       digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4];
8232 +       digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10];
8233 +       digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8];
8234 +       digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14];
8235 +       digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12];
8236 +       digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18];
8237 +       digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16];
8238 +#endif
8239 +}
8240 +
8241 +#endif /*unsupported*/
8242 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
8243 +++ linux/crypto/ocf/safe/sha1.h        2005-05-20 10:30:53.000000000 +1000
8244 @@ -0,0 +1,72 @@
8245 +/*     $FreeBSD: src/sys/crypto/sha1.h,v 1.8 2002/03/20 05:13:50 alfred Exp $  */
8246 +/*     $KAME: sha1.h,v 1.5 2000/03/27 04:36:23 sumikawa Exp $  */
8247 +
8248 +/*
8249 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
8250 + * All rights reserved.
8251 + *
8252 + * Redistribution and use in source and binary forms, with or without
8253 + * modification, are permitted provided that the following conditions
8254 + * are met:
8255 + * 1. Redistributions of source code must retain the above copyright
8256 + *    notice, this list of conditions and the following disclaimer.
8257 + * 2. Redistributions in binary form must reproduce the above copyright
8258 + *    notice, this list of conditions and the following disclaimer in the
8259 + *    documentation and/or other materials provided with the distribution.
8260 + * 3. Neither the name of the project nor the names of its contributors
8261 + *    may be used to endorse or promote products derived from this software
8262 + *    without specific prior written permission.
8263 + *
8264 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
8265 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8266 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8267 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
8268 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
8269 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
8270 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
8271 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
8272 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
8273 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8274 + * SUCH DAMAGE.
8275 + */
8276 +/*
8277 + * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
8278 + * based on: http://csrc.nist.gov/fips/fip180-1.txt
8279 + * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
8280 + */
8281 +
8282 +#ifndef _NETINET6_SHA1_H_
8283 +#define _NETINET6_SHA1_H_
8284 +
8285 +struct sha1_ctxt {
8286 +       union {
8287 +               u_int8_t        b8[20];
8288 +               u_int32_t       b32[5];
8289 +       } h;
8290 +       union {
8291 +               u_int8_t        b8[8];
8292 +               u_int64_t       b64[1];
8293 +       } c;
8294 +       union {
8295 +               u_int8_t        b8[64];
8296 +               u_int32_t       b32[16];
8297 +       } m;
8298 +       u_int8_t        count;
8299 +};
8300 +
8301 +#ifdef __KERNEL__
8302 +extern void sha1_init(struct sha1_ctxt *);
8303 +extern void sha1_pad(struct sha1_ctxt *);
8304 +extern void sha1_loop(struct sha1_ctxt *, const u_int8_t *, size_t);
8305 +extern void sha1_result(struct sha1_ctxt *, caddr_t);
8306 +
8307 +/* compatibilty with other SHA1 source codes */
8308 +typedef struct sha1_ctxt SHA1_CTX;
8309 +#define SHA1Init(x)            sha1_init((x))
8310 +#define SHA1Update(x, y, z)    sha1_loop((x), (y), (z))
8311 +#define SHA1Final(x, y)                sha1_result((y), (x))
8312 +#endif /* __KERNEL__ */
8313 +
8314 +#define        SHA1_RESULTLEN  (160/8)
8315 +
8316 +#endif /*_NETINET6_SHA1_H_*/
8317 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
8318 +++ linux/crypto/ocf/safe/safereg.h     2005-03-16 15:19:57.000000000 +1000
8319 @@ -0,0 +1,421 @@
8320 +/*-
8321 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
8322 + * Copyright (c) 2003 Global Technology Associates, Inc.
8323 + * All rights reserved.
8324 + *
8325 + * Redistribution and use in source and binary forms, with or without
8326 + * modification, are permitted provided that the following conditions
8327 + * are met:
8328 + * 1. Redistributions of source code must retain the above copyright
8329 + *    notice, this list of conditions and the following disclaimer.
8330 + * 2. Redistributions in binary form must reproduce the above copyright
8331 + *    notice, this list of conditions and the following disclaimer in the
8332 + *    documentation and/or other materials provided with the distribution.
8333 + *
8334 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
8335 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8336 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8337 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
8338 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
8339 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
8340 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
8341 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
8342 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
8343 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8344 + * SUCH DAMAGE.
8345 + *
8346 + * $FreeBSD: src/sys/dev/safe/safereg.h,v 1.1 2003/07/21 21:46:07 sam Exp $
8347 + */
8348 +#ifndef _SAFE_SAFEREG_H_
8349 +#define        _SAFE_SAFEREG_H_
8350 +
8351 +/*
8352 + * Register definitions for SafeNet SafeXcel-1141 crypto device.
8353 + * Definitions from revision 1.3 (Nov 6 2002) of the User's Manual.
8354 + */
8355 +
8356 +#define BS_BAR                 0x10    /* DMA base address register */
8357 +#define        BS_TRDY_TIMEOUT         0x40    /* TRDY timeout */
8358 +#define        BS_RETRY_TIMEOUT        0x41    /* DMA retry timeout */
8359 +
8360 +#define        PCI_VENDOR_SAFENET      0x16ae          /* SafeNet, Inc. */
8361 +
8362 +/* SafeNet */
8363 +#define        PCI_PRODUCT_SAFEXCEL    0x1141          /* 1141 */
8364 +
8365 +#define        SAFE_PE_CSR             0x0000  /* Packet Enginge Ctrl/Status */
8366 +#define        SAFE_PE_SRC             0x0004  /* Packet Engine Source */
8367 +#define        SAFE_PE_DST             0x0008  /* Packet Engine Destination */
8368 +#define        SAFE_PE_SA              0x000c  /* Packet Engine SA */
8369 +#define        SAFE_PE_LEN             0x0010  /* Packet Engine Length */
8370 +#define        SAFE_PE_DMACFG          0x0040  /* Packet Engine DMA Configuration */
8371 +#define        SAFE_PE_DMASTAT         0x0044  /* Packet Engine DMA Status */
8372 +#define        SAFE_PE_PDRBASE         0x0048  /* Packet Engine Descriptor Ring Base */
8373 +#define        SAFE_PE_RDRBASE         0x004c  /* Packet Engine Result Ring Base */
8374 +#define        SAFE_PE_RINGCFG         0x0050  /* Packet Engine Ring Configuration */
8375 +#define        SAFE_PE_RINGPOLL        0x0054  /* Packet Engine Ring Poll */
8376 +#define        SAFE_PE_IRNGSTAT        0x0058  /* Packet Engine Internal Ring Status */
8377 +#define        SAFE_PE_ERNGSTAT        0x005c  /* Packet Engine External Ring Status */
8378 +#define        SAFE_PE_IOTHRESH        0x0060  /* Packet Engine I/O Threshold */
8379 +#define        SAFE_PE_GRNGBASE        0x0064  /* Packet Engine Gather Ring Base */
8380 +#define        SAFE_PE_SRNGBASE        0x0068  /* Packet Engine Scatter Ring Base */
8381 +#define        SAFE_PE_PARTSIZE        0x006c  /* Packet Engine Particlar Ring Size */
8382 +#define        SAFE_PE_PARTCFG         0x0070  /* Packet Engine Particle Ring Config */
8383 +#define        SAFE_CRYPTO_CTRL        0x0080  /* Crypto Control */
8384 +#define        SAFE_DEVID              0x0084  /* Device ID */
8385 +#define        SAFE_DEVINFO            0x0088  /* Device Info */
8386 +#define        SAFE_HU_STAT            0x00a0  /* Host Unmasked Status */
8387 +#define        SAFE_HM_STAT            0x00a4  /* Host Masked Status (read-only) */
8388 +#define        SAFE_HI_CLR             0x00a4  /* Host Clear Interrupt (write-only) */
8389 +#define        SAFE_HI_MASK            0x00a8  /* Host Mask Control */
8390 +#define        SAFE_HI_CFG             0x00ac  /* Interrupt Configuration */
8391 +#define        SAFE_HI_RD_DESCR        0x00b4  /* Force Descriptor Read */
8392 +#define        SAFE_HI_DESC_CNT        0x00b8  /* Host Descriptor Done Count */
8393 +#define        SAFE_DMA_ENDIAN         0x00c0  /* Master Endian Status */
8394 +#define        SAFE_DMA_SRCADDR        0x00c4  /* DMA Source Address Status */
8395 +#define        SAFE_DMA_DSTADDR        0x00c8  /* DMA Destination Address Status */
8396 +#define        SAFE_DMA_STAT           0x00cc  /* DMA Current Status */
8397 +#define        SAFE_DMA_CFG            0x00d4  /* DMA Configuration/Status */
8398 +#define        SAFE_ENDIAN             0x00e0  /* Endian Configuration */
8399 +#define        SAFE_PK_A_ADDR          0x0800  /* Public Key A Address */
8400 +#define        SAFE_PK_B_ADDR          0x0804  /* Public Key B Address */
8401 +#define        SAFE_PK_C_ADDR          0x0808  /* Public Key C Address */
8402 +#define        SAFE_PK_D_ADDR          0x080c  /* Public Key D Address */
8403 +#define        SAFE_PK_A_LEN           0x0810  /* Public Key A Length */
8404 +#define        SAFE_PK_B_LEN           0x0814  /* Public Key B Length */
8405 +#define        SAFE_PK_SHIFT           0x0818  /* Public Key Shift */
8406 +#define        SAFE_PK_FUNC            0x081c  /* Public Key Function */
8407 +#define SAFE_PK_RAM_START      0x1000  /* Public Key RAM start address */
8408 +#define SAFE_PK_RAM_END                0x1fff  /* Public Key RAM end address */
8409 +
8410 +#define        SAFE_RNG_OUT            0x0100  /* RNG Output */
8411 +#define        SAFE_RNG_STAT           0x0104  /* RNG Status */
8412 +#define        SAFE_RNG_CTRL           0x0108  /* RNG Control */
8413 +#define        SAFE_RNG_A              0x010c  /* RNG A */
8414 +#define        SAFE_RNG_B              0x0110  /* RNG B */
8415 +#define        SAFE_RNG_X_LO           0x0114  /* RNG X [31:0] */
8416 +#define        SAFE_RNG_X_MID          0x0118  /* RNG X [63:32] */
8417 +#define        SAFE_RNG_X_HI           0x011c  /* RNG X [80:64] */
8418 +#define        SAFE_RNG_X_CNTR         0x0120  /* RNG Counter */
8419 +#define        SAFE_RNG_ALM_CNT        0x0124  /* RNG Alarm Count */
8420 +#define        SAFE_RNG_CNFG           0x0128  /* RNG Configuration */
8421 +#define        SAFE_RNG_LFSR1_LO       0x012c  /* RNG LFSR1 [31:0] */
8422 +#define        SAFE_RNG_LFSR1_HI       0x0130  /* RNG LFSR1 [47:32] */
8423 +#define        SAFE_RNG_LFSR2_LO       0x0134  /* RNG LFSR1 [31:0] */
8424 +#define        SAFE_RNG_LFSR2_HI       0x0138  /* RNG LFSR1 [47:32] */
8425 +
8426 +#define        SAFE_PE_CSR_READY       0x00000001      /* ready for processing */
8427 +#define        SAFE_PE_CSR_DONE        0x00000002      /* h/w completed processing */
8428 +#define        SAFE_PE_CSR_LOADSA      0x00000004      /* load SA digests */
8429 +#define        SAFE_PE_CSR_HASHFINAL   0x00000010      /* do hash pad & write result */
8430 +#define        SAFE_PE_CSR_SABUSID     0x000000c0      /* bus id for SA */
8431 +#define        SAFE_PE_CSR_SAPCI       0x00000040      /* PCI bus id for SA */
8432 +#define        SAFE_PE_CSR_NXTHDR      0x0000ff00      /* next hdr value for IPsec */
8433 +#define        SAFE_PE_CSR_FPAD        0x0000ff00      /* fixed pad for basic ops */
8434 +#define        SAFE_PE_CSR_STATUS      0x00ff0000      /* operation result status */
8435 +#define        SAFE_PE_CSR_AUTH_FAIL   0x00010000      /* ICV mismatch (inbound) */
8436 +#define        SAFE_PE_CSR_PAD_FAIL    0x00020000      /* pad verify fail (inbound) */
8437 +#define        SAFE_PE_CSR_SEQ_FAIL    0x00040000      /* sequence number (inbound) */
8438 +#define        SAFE_PE_CSR_XERROR      0x00080000      /* extended error follows */
8439 +#define        SAFE_PE_CSR_XECODE      0x00f00000      /* extended error code */
8440 +#define        SAFE_PE_CSR_XECODE_S    20
8441 +#define        SAFE_PE_CSR_XECODE_BADCMD       0       /* invalid command */
8442 +#define        SAFE_PE_CSR_XECODE_BADALG       1       /* invalid algorithm */
8443 +#define        SAFE_PE_CSR_XECODE_ALGDIS       2       /* algorithm disabled */
8444 +#define        SAFE_PE_CSR_XECODE_ZEROLEN      3       /* zero packet length */
8445 +#define        SAFE_PE_CSR_XECODE_DMAERR       4       /* bus DMA error */
8446 +#define        SAFE_PE_CSR_XECODE_PIPEABORT    5       /* secondary bus DMA error */
8447 +#define        SAFE_PE_CSR_XECODE_BADSPI       6       /* IPsec SPI mismatch */
8448 +#define        SAFE_PE_CSR_XECODE_TIMEOUT      10      /* failsafe timeout */
8449 +#define        SAFE_PE_CSR_PAD         0xff000000      /* ESP padding control/status */
8450 +#define        SAFE_PE_CSR_PAD_MIN     0x00000000      /* minimum IPsec padding */
8451 +#define        SAFE_PE_CSR_PAD_16      0x08000000      /* pad to 16-byte boundary */
8452 +#define        SAFE_PE_CSR_PAD_32      0x10000000      /* pad to 32-byte boundary */
8453 +#define        SAFE_PE_CSR_PAD_64      0x20000000      /* pad to 64-byte boundary */
8454 +#define        SAFE_PE_CSR_PAD_128     0x40000000      /* pad to 128-byte boundary */
8455 +#define        SAFE_PE_CSR_PAD_256     0x80000000      /* pad to 256-byte boundary */
8456 +
8457 +/*
8458 + * Check the CSR to see if the PE has returned ownership to
8459 + * the host.  Note that before processing a descriptor this
8460 + * must be done followed by a check of the SAFE_PE_LEN register
8461 + * status bits to avoid premature processing of a descriptor
8462 + * on its way back to the host.
8463 + */
8464 +#define        SAFE_PE_CSR_IS_DONE(_csr) \
8465 +    (((_csr) & (SAFE_PE_CSR_READY | SAFE_PE_CSR_DONE)) == SAFE_PE_CSR_DONE)
8466 +
8467 +#define        SAFE_PE_LEN_LENGTH      0x000fffff      /* total length (bytes) */
8468 +#define        SAFE_PE_LEN_READY       0x00400000      /* ready for processing */
8469 +#define        SAFE_PE_LEN_DONE        0x00800000      /* h/w completed processing */
8470 +#define        SAFE_PE_LEN_BYPASS      0xff000000      /* bypass offset (bytes) */
8471 +#define        SAFE_PE_LEN_BYPASS_S    24
8472 +
8473 +#define        SAFE_PE_LEN_IS_DONE(_len) \
8474 +    (((_len) & (SAFE_PE_LEN_READY | SAFE_PE_LEN_DONE)) == SAFE_PE_LEN_DONE)
8475 +
8476 +/* NB: these apply to HU_STAT, HM_STAT, HI_CLR, and HI_MASK */
8477 +#define        SAFE_INT_PE_CDONE       0x00000002      /* PE context done */
8478 +#define        SAFE_INT_PE_DDONE       0x00000008      /* PE descriptor done */
8479 +#define        SAFE_INT_PE_ERROR       0x00000010      /* PE error */
8480 +#define        SAFE_INT_PE_ODONE       0x00000020      /* PE operation done */
8481 +
8482 +#define        SAFE_HI_CFG_PULSE       0x00000001      /* use pulse interrupt */
8483 +#define        SAFE_HI_CFG_LEVEL       0x00000000      /* use level interrupt */
8484 +#define        SAFE_HI_CFG_AUTOCLR     0x00000002      /* auto-clear pulse interrupt */
8485 +
8486 +#define        SAFE_ENDIAN_PASS        0x000000e4      /* straight pass-thru */
8487 +#define        SAFE_ENDIAN_SWAB        0x0000001b      /* swap bytes in 32-bit word */
8488 +
8489 +#define        SAFE_PE_DMACFG_PERESET  0x00000001      /* reset packet engine */
8490 +#define        SAFE_PE_DMACFG_PDRRESET 0x00000002      /* reset PDR counters/ptrs */
8491 +#define        SAFE_PE_DMACFG_SGRESET  0x00000004      /* reset scatter/gather cache */
8492 +#define        SAFE_PE_DMACFG_FSENA    0x00000008      /* enable failsafe reset */
8493 +#define        SAFE_PE_DMACFG_PEMODE   0x00000100      /* packet engine mode */
8494 +#define        SAFE_PE_DMACFG_SAPREC   0x00000200      /* SA precedes packet */
8495 +#define        SAFE_PE_DMACFG_PKFOLL   0x00000400      /* packet follows descriptor */
8496 +#define        SAFE_PE_DMACFG_GPRBID   0x00003000      /* gather particle ring busid */
8497 +#define        SAFE_PE_DMACFG_GPRPCI   0x00001000      /* PCI gather particle ring */
8498 +#define        SAFE_PE_DMACFG_SPRBID   0x0000c000      /* scatter part. ring busid */
8499 +#define        SAFE_PE_DMACFG_SPRPCI   0x00004000      /* PCI scatter part. ring */
8500 +#define        SAFE_PE_DMACFG_ESDESC   0x00010000      /* endian swap descriptors */
8501 +#define        SAFE_PE_DMACFG_ESSA     0x00020000      /* endian swap SA data */
8502 +#define        SAFE_PE_DMACFG_ESPACKET 0x00040000      /* endian swap packet data */
8503 +#define        SAFE_PE_DMACFG_ESPDESC  0x00080000      /* endian swap particle desc. */
8504 +#define        SAFE_PE_DMACFG_NOPDRUP  0x00100000      /* supp. PDR ownership update */
8505 +#define        SAFE_PD_EDMACFG_PCIMODE 0x01000000      /* PCI target mode */
8506 +
8507 +#define        SAFE_PE_DMASTAT_PEIDONE 0x00000001      /* PE core input done */
8508 +#define        SAFE_PE_DMASTAT_PEODONE 0x00000002      /* PE core output done */
8509 +#define        SAFE_PE_DMASTAT_ENCDONE 0x00000004      /* encryption done */
8510 +#define        SAFE_PE_DMASTAT_IHDONE  0x00000008      /* inner hash done */
8511 +#define        SAFE_PE_DMASTAT_OHDONE  0x00000010      /* outer hash (HMAC) done */
8512 +#define        SAFE_PE_DMASTAT_PADFLT  0x00000020      /* crypto pad fault */
8513 +#define        SAFE_PE_DMASTAT_ICVFLT  0x00000040      /* ICV fault */
8514 +#define        SAFE_PE_DMASTAT_SPIMIS  0x00000080      /* SPI mismatch */
8515 +#define        SAFE_PE_DMASTAT_CRYPTO  0x00000100      /* crypto engine timeout */
8516 +#define        SAFE_PE_DMASTAT_CQACT   0x00000200      /* command queue active */
8517 +#define        SAFE_PE_DMASTAT_IRACT   0x00000400      /* input request active */
8518 +#define        SAFE_PE_DMASTAT_ORACT   0x00000800      /* output request active */
8519 +#define        SAFE_PE_DMASTAT_PEISIZE 0x003ff000      /* PE input size:32-bit words */
8520 +#define        SAFE_PE_DMASTAT_PEOSIZE 0xffc00000      /* PE out. size:32-bit words */
8521 +
8522 +#define        SAFE_PE_RINGCFG_SIZE    0x000003ff      /* ring size (descriptors) */
8523 +#define        SAFE_PE_RINGCFG_OFFSET  0xffff0000      /* offset btw desc's (dwords) */
8524 +#define        SAFE_PE_RINGCFG_OFFSET_S        16
8525 +
8526 +#define        SAFE_PE_RINGPOLL_POLL   0x00000fff      /* polling frequency/divisor */
8527 +#define        SAFE_PE_RINGPOLL_RETRY  0x03ff0000      /* polling frequency/divisor */
8528 +#define        SAFE_PE_RINGPOLL_CONT   0x80000000      /* continuously poll */
8529 +
8530 +#define        SAFE_PE_IRNGSTAT_CQAVAIL 0x00000001     /* command queue available */
8531 +
8532 +#define        SAFE_PE_ERNGSTAT_NEXT   0x03ff0000      /* index of next packet desc. */
8533 +#define        SAFE_PE_ERNGSTAT_NEXT_S 16
8534 +
8535 +#define        SAFE_PE_IOTHRESH_INPUT  0x000003ff      /* input threshold (dwords) */
8536 +#define        SAFE_PE_IOTHRESH_OUTPUT 0x03ff0000      /* output threshold (dwords) */
8537 +
8538 +#define        SAFE_PE_PARTCFG_SIZE    0x0000ffff      /* scatter particle size */
8539 +#define        SAFE_PE_PARTCFG_GBURST  0x00030000      /* gather particle burst */
8540 +#define        SAFE_PE_PARTCFG_GBURST_2        0x00000000
8541 +#define        SAFE_PE_PARTCFG_GBURST_4        0x00010000
8542 +#define        SAFE_PE_PARTCFG_GBURST_8        0x00020000
8543 +#define        SAFE_PE_PARTCFG_GBURST_16       0x00030000
8544 +#define        SAFE_PE_PARTCFG_SBURST  0x000c0000      /* scatter particle burst */
8545 +#define        SAFE_PE_PARTCFG_SBURST_2        0x00000000
8546 +#define        SAFE_PE_PARTCFG_SBURST_4        0x00040000
8547 +#define        SAFE_PE_PARTCFG_SBURST_8        0x00080000
8548 +#define        SAFE_PE_PARTCFG_SBURST_16       0x000c0000
8549 +
8550 +#define        SAFE_PE_PARTSIZE_SCAT   0xffff0000      /* scatter particle ring size */
8551 +#define        SAFE_PE_PARTSIZE_GATH   0x0000ffff      /* gather particle ring size */
8552 +
8553 +#define        SAFE_CRYPTO_CTRL_3DES   0x00000001      /* enable 3DES support */
8554 +#define        SAFE_CRYPTO_CTRL_PKEY   0x00010000      /* enable public key support */
8555 +#define        SAFE_CRYPTO_CTRL_RNG    0x00020000      /* enable RNG support */
8556 +
8557 +#define        SAFE_DEVINFO_REV_MIN    0x0000000f      /* minor rev for chip */
8558 +#define        SAFE_DEVINFO_REV_MAJ    0x000000f0      /* major rev for chip */
8559 +#define        SAFE_DEVINFO_REV_MAJ_S  4
8560 +#define        SAFE_DEVINFO_DES        0x00000100      /* DES/3DES support present */
8561 +#define        SAFE_DEVINFO_ARC4       0x00000200      /* ARC4 support present */
8562 +#define        SAFE_DEVINFO_AES        0x00000400      /* AES support present */
8563 +#define        SAFE_DEVINFO_MD5        0x00001000      /* MD5 support present */
8564 +#define        SAFE_DEVINFO_SHA1       0x00002000      /* SHA-1 support present */
8565 +#define        SAFE_DEVINFO_RIPEMD     0x00004000      /* RIPEMD support present */
8566 +#define        SAFE_DEVINFO_DEFLATE    0x00010000      /* Deflate support present */
8567 +#define        SAFE_DEVINFO_SARAM      0x00100000      /* on-chip SA RAM present */
8568 +#define        SAFE_DEVINFO_EMIBUS     0x00200000      /* EMI bus present */
8569 +#define        SAFE_DEVINFO_PKEY       0x00400000      /* public key support present */
8570 +#define        SAFE_DEVINFO_RNG        0x00800000      /* RNG present */
8571 +
8572 +#define        SAFE_REV(_maj, _min)    (((_maj) << SAFE_DEVINFO_REV_MAJ_S) | (_min))
8573 +#define        SAFE_REV_MAJ(_chiprev) \
8574 +       (((_chiprev) & SAFE_DEVINFO_REV_MAJ) >> SAFE_DEVINFO_REV_MAJ_S)
8575 +#define        SAFE_REV_MIN(_chiprev)  ((_chiprev) & SAFE_DEVINFO_REV_MIN)
8576 +
8577 +#define        SAFE_PK_FUNC_MULT       0x00000001      /* Multiply function */
8578 +#define        SAFE_PK_FUNC_SQUARE     0x00000004      /* Square function */
8579 +#define        SAFE_PK_FUNC_ADD        0x00000010      /* Add function */
8580 +#define        SAFE_PK_FUNC_SUB        0x00000020      /* Subtract function */
8581 +#define        SAFE_PK_FUNC_LSHIFT     0x00000040      /* Left-shift function */
8582 +#define        SAFE_PK_FUNC_RSHIFT     0x00000080      /* Right-shift function */
8583 +#define        SAFE_PK_FUNC_DIV        0x00000100      /* Divide function */
8584 +#define        SAFE_PK_FUNC_CMP        0x00000400      /* Compare function */
8585 +#define        SAFE_PK_FUNC_COPY       0x00000800      /* Copy function */
8586 +#define        SAFE_PK_FUNC_EXP16      0x00002000      /* Exponentiate (4-bit ACT) */
8587 +#define        SAFE_PK_FUNC_EXP4       0x00004000      /* Exponentiate (2-bit ACT) */
8588 +#define        SAFE_PK_FUNC_RUN        0x00008000      /* start/status */
8589 +
8590 +#define        SAFE_RNG_STAT_BUSY      0x00000001      /* busy, data not valid */
8591 +
8592 +#define        SAFE_RNG_CTRL_PRE_LFSR  0x00000001      /* enable output pre-LFSR */
8593 +#define        SAFE_RNG_CTRL_TST_MODE  0x00000002      /* enable test mode */
8594 +#define        SAFE_RNG_CTRL_TST_RUN   0x00000004      /* start test state machine */
8595 +#define        SAFE_RNG_CTRL_ENA_RING1 0x00000008      /* test entropy oscillator #1 */
8596 +#define        SAFE_RNG_CTRL_ENA_RING2 0x00000010      /* test entropy oscillator #2 */
8597 +#define        SAFE_RNG_CTRL_DIS_ALARM 0x00000020      /* disable RNG alarm reports */
8598 +#define        SAFE_RNG_CTRL_TST_CLOCK 0x00000040      /* enable test clock */
8599 +#define        SAFE_RNG_CTRL_SHORTEN   0x00000080      /* shorten state timers */
8600 +#define        SAFE_RNG_CTRL_TST_ALARM 0x00000100      /* simulate alarm state */
8601 +#define        SAFE_RNG_CTRL_RST_LFSR  0x00000200      /* reset LFSR */
8602 +
8603 +/*
8604 + * Packet engine descriptor.  Note that d_csr is a copy of the
8605 + * SAFE_PE_CSR register and all definitions apply, and d_len
8606 + * is a copy of the SAFE_PE_LEN register and all definitions apply.
8607 + * d_src and d_len may point directly to contiguous data or to a
8608 + * list of ``particle descriptors'' when using scatter/gather i/o.
8609 + */
8610 +struct safe_desc {
8611 +       u_int32_t       d_csr;                  /* per-packet control/status */
8612 +       u_int32_t       d_src;                  /* source address */
8613 +       u_int32_t       d_dst;                  /* destination address */
8614 +       u_int32_t       d_sa;                   /* SA address */
8615 +       u_int32_t       d_len;                  /* length, bypass, status */
8616 +};
8617 +
8618 +/*
8619 + * Scatter/Gather particle descriptor.
8620 + *
8621 + * NB: scatter descriptors do not specify a size; this is fixed
8622 + *     by the setting of the SAFE_PE_PARTCFG register.
8623 + */
8624 +struct safe_pdesc {
8625 +       u_int32_t       pd_addr;                /* particle address */
8626 +#ifdef __BIG_ENDIAN
8627 +       u_int16_t       pd_flags;               /* control word */
8628 +       u_int16_t       pd_size;                /* particle size (bytes) */
8629 +#else
8630 +       u_int16_t       pd_flags;               /* control word */
8631 +       u_int16_t       pd_size;                /* particle size (bytes) */
8632 +#endif
8633 +};
8634 +
8635 +#define        SAFE_PD_READY   0x0001                  /* ready for processing */
8636 +#define        SAFE_PD_DONE    0x0002                  /* h/w completed processing */
8637 +
8638 +/*
8639 + * Security Association (SA) Record (Rev 1).  One of these is
8640 + * required for each operation processed by the packet engine.
8641 + */
8642 +struct safe_sarec {
8643 +       u_int32_t       sa_cmd0;
8644 +       u_int32_t       sa_cmd1;
8645 +       u_int32_t       sa_resv0;
8646 +       u_int32_t       sa_resv1;
8647 +       u_int32_t       sa_key[8];              /* DES/3DES/AES key */
8648 +       u_int32_t       sa_indigest[5];         /* inner digest */
8649 +       u_int32_t       sa_outdigest[5];        /* outer digest */
8650 +       u_int32_t       sa_spi;                 /* SPI */
8651 +       u_int32_t       sa_seqnum;              /* sequence number */
8652 +       u_int32_t       sa_seqmask[2];          /* sequence number mask */
8653 +       u_int32_t       sa_resv2;
8654 +       u_int32_t       sa_staterec;            /* address of state record */
8655 +       u_int32_t       sa_resv3[2];
8656 +       u_int32_t       sa_samgmt0;             /* SA management field 0 */
8657 +       u_int32_t       sa_samgmt1;             /* SA management field 0 */
8658 +};
8659 +
8660 +#define        SAFE_SA_CMD0_OP         0x00000007      /* operation code */
8661 +#define        SAFE_SA_CMD0_OP_CRYPT   0x00000000      /* encrypt/decrypt (basic) */
8662 +#define        SAFE_SA_CMD0_OP_BOTH    0x00000001      /* encrypt-hash/hash-decrypto */
8663 +#define        SAFE_SA_CMD0_OP_HASH    0x00000003      /* hash (outbound-only) */
8664 +#define        SAFE_SA_CMD0_OP_ESP     0x00000000      /* ESP in/out (proto) */
8665 +#define        SAFE_SA_CMD0_OP_AH      0x00000001      /* AH in/out (proto) */
8666 +#define        SAFE_SA_CMD0_INBOUND    0x00000008      /* inbound operation */
8667 +#define        SAFE_SA_CMD0_OUTBOUND   0x00000000      /* outbound operation */
8668 +#define        SAFE_SA_CMD0_GROUP      0x00000030      /* operation group */
8669 +#define        SAFE_SA_CMD0_BASIC      0x00000000      /* basic operation */
8670 +#define        SAFE_SA_CMD0_PROTO      0x00000010      /* protocol/packet operation */
8671 +#define        SAFE_SA_CMD0_BUNDLE     0x00000020      /* bundled operation (resvd) */
8672 +#define        SAFE_SA_CMD0_PAD        0x000000c0      /* crypto pad method */
8673 +#define        SAFE_SA_CMD0_PAD_IPSEC  0x00000000      /* IPsec padding */
8674 +#define        SAFE_SA_CMD0_PAD_PKCS7  0x00000040      /* PKCS#7 padding */
8675 +#define        SAFE_SA_CMD0_PAD_CONS   0x00000080      /* constant padding */
8676 +#define        SAFE_SA_CMD0_PAD_ZERO   0x000000c0      /* zero padding */
8677 +#define        SAFE_SA_CMD0_CRYPT_ALG  0x00000f00      /* symmetric crypto algorithm */
8678 +#define        SAFE_SA_CMD0_DES        0x00000000      /* DES crypto algorithm */
8679 +#define        SAFE_SA_CMD0_3DES       0x00000100      /* 3DES crypto algorithm */
8680 +#define        SAFE_SA_CMD0_AES        0x00000300      /* AES crypto algorithm */
8681 +#define        SAFE_SA_CMD0_CRYPT_NULL 0x00000f00      /* null crypto algorithm */
8682 +#define        SAFE_SA_CMD0_HASH_ALG   0x0000f000      /* hash algorithm */
8683 +#define        SAFE_SA_CMD0_MD5        0x00000000      /* MD5 hash algorithm */
8684 +#define        SAFE_SA_CMD0_SHA1       0x00001000      /* SHA-1 hash algorithm */
8685 +#define        SAFE_SA_CMD0_HASH_NULL  0x0000f000      /* null hash algorithm */
8686 +#define        SAFE_SA_CMD0_HDR_PROC   0x00080000      /* header processing */
8687 +#define        SAFE_SA_CMD0_IBUSID     0x00300000      /* input bus id */
8688 +#define        SAFE_SA_CMD0_IPCI       0x00100000      /* PCI input bus id */
8689 +#define        SAFE_SA_CMD0_OBUSID     0x00c00000      /* output bus id */
8690 +#define        SAFE_SA_CMD0_OPCI       0x00400000      /* PCI output bus id */
8691 +#define        SAFE_SA_CMD0_IVLD       0x03000000      /* IV loading */
8692 +#define        SAFE_SA_CMD0_IVLD_NONE  0x00000000      /* IV no load (reuse) */
8693 +#define        SAFE_SA_CMD0_IVLD_IBUF  0x01000000      /* IV load from input buffer */
8694 +#define        SAFE_SA_CMD0_IVLD_STATE 0x02000000      /* IV load from state */
8695 +#define        SAFE_SA_CMD0_HSLD       0x0c000000      /* hash state loading */
8696 +#define        SAFE_SA_CMD0_HSLD_SA    0x00000000      /* hash state load from SA */
8697 +#define        SAFE_SA_CMD0_HSLD_STATE 0x08000000      /* hash state load from state */
8698 +#define        SAFE_SA_CMD0_HSLD_NONE  0x0c000000      /* hash state no load */
8699 +#define        SAFE_SA_CMD0_SAVEIV     0x10000000      /* save IV */
8700 +#define        SAFE_SA_CMD0_SAVEHASH   0x20000000      /* save hash state */
8701 +#define        SAFE_SA_CMD0_IGATHER    0x40000000      /* input gather */
8702 +#define        SAFE_SA_CMD0_OSCATTER   0x80000000      /* output scatter */
8703 +
8704 +#define        SAFE_SA_CMD1_HDRCOPY    0x00000002      /* copy header to output */
8705 +#define        SAFE_SA_CMD1_PAYCOPY    0x00000004      /* copy payload to output */
8706 +#define        SAFE_SA_CMD1_PADCOPY    0x00000008      /* copy pad to output */
8707 +#define        SAFE_SA_CMD1_IPV4       0x00000000      /* IPv4 protocol */
8708 +#define        SAFE_SA_CMD1_IPV6       0x00000010      /* IPv6 protocol */
8709 +#define        SAFE_SA_CMD1_MUTABLE    0x00000020      /* mutable bit processing */
8710 +#define        SAFE_SA_CMD1_SRBUSID    0x000000c0      /* state record bus id */
8711 +#define        SAFE_SA_CMD1_SRPCI      0x00000040      /* state record from PCI */
8712 +#define        SAFE_SA_CMD1_CRMODE     0x00000300      /* crypto mode */
8713 +#define        SAFE_SA_CMD1_ECB        0x00000000      /* ECB crypto mode */
8714 +#define        SAFE_SA_CMD1_CBC        0x00000100      /* CBC crypto mode */
8715 +#define        SAFE_SA_CMD1_OFB        0x00000200      /* OFB crypto mode */
8716 +#define        SAFE_SA_CMD1_CFB        0x00000300      /* CFB crypto mode */
8717 +#define        SAFE_SA_CMD1_CRFEEDBACK 0x00000c00      /* crypto feedback mode */
8718 +#define        SAFE_SA_CMD1_64BIT      0x00000000      /* 64-bit crypto feedback */
8719 +#define        SAFE_SA_CMD1_8BIT       0x00000400      /* 8-bit crypto feedback */
8720 +#define        SAFE_SA_CMD1_1BIT       0x00000800      /* 1-bit crypto feedback */
8721 +#define        SAFE_SA_CMD1_128BIT     0x00000c00      /* 128-bit crypto feedback */
8722 +#define        SAFE_SA_CMD1_OPTIONS    0x00001000      /* HMAC/options mutable bit */
8723 +#define        SAFE_SA_CMD1_HMAC       SAFE_SA_CMD1_OPTIONS
8724 +#define        SAFE_SA_CMD1_SAREV1     0x00008000      /* SA Revision 1 */
8725 +#define        SAFE_SA_CMD1_OFFSET     0x00ff0000      /* hash/crypto offset(dwords) */
8726 +#define        SAFE_SA_CMD1_OFFSET_S   16
8727 +#define        SAFE_SA_CMD1_AESKEYLEN  0x0f000000      /* AES key length */
8728 +#define        SAFE_SA_CMD1_AES128     0x02000000      /* 128-bit AES key */
8729 +#define        SAFE_SA_CMD1_AES192     0x03000000      /* 192-bit AES key */
8730 +#define        SAFE_SA_CMD1_AES256     0x04000000      /* 256-bit AES key */
8731 +
8732 +/* 
8733 + * Security Associate State Record (Rev 1).
8734 + */
8735 +struct safe_sastate {
8736 +       u_int32_t       sa_saved_iv[4];         /* saved IV (DES/3DES/AES) */
8737 +       u_int32_t       sa_saved_hashbc;        /* saved hash byte count */
8738 +       u_int32_t       sa_saved_indigest[5];   /* saved inner digest */
8739 +};
8740 +#endif /* _SAFE_SAFEREG_H_ */
8741 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
8742 +++ linux/crypto/ocf/safe/safevar.h     2007-07-03 09:46:58.000000000 +1000
8743 @@ -0,0 +1,230 @@
8744 +/*-
8745 + * The linux port of this code done by David McCullough
8746 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com>
8747 + * The license and original author are listed below.
8748 + *
8749 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
8750 + * Copyright (c) 2003 Global Technology Associates, Inc.
8751 + * All rights reserved.
8752 + *
8753 + * Redistribution and use in source and binary forms, with or without
8754 + * modification, are permitted provided that the following conditions
8755 + * are met:
8756 + * 1. Redistributions of source code must retain the above copyright
8757 + *    notice, this list of conditions and the following disclaimer.
8758 + * 2. Redistributions in binary form must reproduce the above copyright
8759 + *    notice, this list of conditions and the following disclaimer in the
8760 + *    documentation and/or other materials provided with the distribution.
8761 + *
8762 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
8763 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8764 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8765 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
8766 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
8767 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
8768 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
8769 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
8770 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
8771 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8772 + * SUCH DAMAGE.
8773 + *
8774 + * $FreeBSD: src/sys/dev/safe/safevar.h,v 1.2 2006/05/17 18:34:26 pjd Exp $
8775 + */
8776 +#ifndef _SAFE_SAFEVAR_H_
8777 +#define        _SAFE_SAFEVAR_H_
8778 +
8779 +/* Maximum queue length */
8780 +#ifndef SAFE_MAX_NQUEUE
8781 +#define SAFE_MAX_NQUEUE        60
8782 +#endif
8783 +
8784 +#define        SAFE_MAX_PART           64      /* Maximum scatter/gather depth */
8785 +#define        SAFE_DMA_BOUNDARY       0       /* No boundary for source DMA ops */
8786 +#define        SAFE_MAX_DSIZE          2048 /* MCLBYTES Fixed scatter particle size */
8787 +#define        SAFE_MAX_SSIZE          0x0ffff /* Maximum gather particle size */
8788 +#define        SAFE_MAX_DMA            0xfffff /* Maximum PE operand size (20 bits) */
8789 +/* total src+dst particle descriptors */
8790 +#define        SAFE_TOTAL_DPART        (SAFE_MAX_NQUEUE * SAFE_MAX_PART)
8791 +#define        SAFE_TOTAL_SPART        (SAFE_MAX_NQUEUE * SAFE_MAX_PART)
8792 +
8793 +#define        SAFE_RNG_MAXBUFSIZ      128     /* 32-bit words */
8794 +
8795 +#define        SAFE_CARD(sid)          (((sid) & 0xf0000000) >> 28)
8796 +#define        SAFE_SESSION(sid)       ( (sid) & 0x0fffffff)
8797 +#define        SAFE_SID(crd, sesn)     (((crd) << 28) | ((sesn) & 0x0fffffff))
8798 +
8799 +#define SAFE_DEF_RTY           0xff    /* PCI Retry Timeout */
8800 +#define SAFE_DEF_TOUT          0xff    /* PCI TRDY Timeout */
8801 +#define SAFE_DEF_CACHELINE     0x01    /* Cache Line setting */
8802 +
8803 +#ifdef __KERNEL__
8804 +/*
8805 + * State associated with the allocation of each chunk
8806 + * of memory setup for DMA.
8807 + */
8808 +struct safe_dma_alloc {
8809 +       dma_addr_t              dma_paddr;
8810 +       void                    *dma_vaddr;
8811 +};
8812 +
8813 +/*
8814 + * Cryptographic operand state.  One of these exists for each
8815 + * source and destination operand passed in from the crypto
8816 + * subsystem.  When possible source and destination operands
8817 + * refer to the same memory.  More often they are distinct.
8818 + * We track the virtual address of each operand as well as
8819 + * where each is mapped for DMA.
8820 + */
8821 +struct safe_operand {
8822 +       union {
8823 +               struct sk_buff *skb;
8824 +               struct uio *io;
8825 +       } u;
8826 +       void                    *map;
8827 +       int                             mapsize;        /* total number of bytes in segs */
8828 +       struct {
8829 +               dma_addr_t      ds_addr;
8830 +               int                     ds_len;
8831 +               int                     ds_tlen;
8832 +       } segs[SAFE_MAX_PART];
8833 +       int                             nsegs;
8834 +};
8835 +
8836 +/*
8837 + * Packet engine ring entry and cryptographic operation state.
8838 + * The packet engine requires a ring of descriptors that contain
8839 + * pointers to various cryptographic state.  However the ring
8840 + * configuration register allows you to specify an arbitrary size
8841 + * for ring entries.  We use this feature to collect most of the
8842 + * state for each cryptographic request into one spot.  Other than
8843 + * ring entries only the ``particle descriptors'' (scatter/gather
8844 + * lists) and the actual operand data are kept separate.  The
8845 + * particle descriptors must also be organized in rings.  The
8846 + * operand data can be located aribtrarily (modulo alignment constraints).
8847 + *
8848 + * Note that the descriptor ring is mapped onto the PCI bus so
8849 + * the hardware can DMA data.  This means the entire ring must be
8850 + * contiguous.
8851 + */
8852 +struct safe_ringentry {
8853 +       struct safe_desc        re_desc;        /* command descriptor */
8854 +       struct safe_sarec       re_sa;          /* SA record */
8855 +       struct safe_sastate     re_sastate;     /* SA state record */
8856 +
8857 +       struct cryptop          *re_crp;        /* crypto operation */
8858 +
8859 +       struct safe_operand     re_src;         /* source operand */
8860 +       struct safe_operand     re_dst;         /* destination operand */
8861 +
8862 +       int                     re_sesn;        /* crypto session ID */
8863 +       int                     re_flags;
8864 +#define        SAFE_QFLAGS_COPYOUTIV   0x1             /* copy back on completion */
8865 +#define        SAFE_QFLAGS_COPYOUTICV  0x2             /* copy back on completion */
8866 +};
8867 +
8868 +#define        re_src_skb      re_src.u.skb
8869 +#define        re_src_io       re_src.u.io
8870 +#define        re_src_map      re_src.map
8871 +#define        re_src_nsegs    re_src.nsegs
8872 +#define        re_src_segs     re_src.segs
8873 +#define        re_src_mapsize  re_src.mapsize
8874 +
8875 +#define        re_dst_skb      re_dst.u.skb
8876 +#define        re_dst_io       re_dst.u.io
8877 +#define        re_dst_map      re_dst.map
8878 +#define        re_dst_nsegs    re_dst.nsegs
8879 +#define        re_dst_segs     re_dst.segs
8880 +#define        re_dst_mapsize  re_dst.mapsize
8881 +
8882 +struct rndstate_test;
8883 +
8884 +struct safe_session {
8885 +       u_int32_t       ses_used;
8886 +       u_int32_t       ses_klen;               /* key length in bits */
8887 +       u_int32_t       ses_key[8];             /* DES/3DES/AES key */
8888 +       u_int32_t       ses_mlen;               /* hmac length in bytes */
8889 +       u_int32_t       ses_hminner[5];         /* hmac inner state */
8890 +       u_int32_t       ses_hmouter[5];         /* hmac outer state */
8891 +       u_int32_t       ses_iv[4];              /* DES/3DES/AES iv */
8892 +};
8893 +
8894 +struct safe_pkq {
8895 +       struct list_head        pkq_list;
8896 +       struct cryptkop         *pkq_krp;
8897 +};
8898 +
8899 +struct safe_softc {
8900 +       softc_device_decl       sc_dev;
8901 +       u32                     sc_irq;
8902 +
8903 +       struct pci_dev          *sc_pcidev;
8904 +       ocf_iomem_t             sc_base_addr;
8905 +
8906 +       u_int                   sc_chiprev;     /* major/minor chip revision */
8907 +       int                     sc_flags;       /* device specific flags */
8908 +#define        SAFE_FLAGS_KEY          0x01            /* has key accelerator */
8909 +#define        SAFE_FLAGS_RNG          0x02            /* hardware rng */
8910 +       int                     sc_suspended;
8911 +       int                     sc_needwakeup;  /* notify crypto layer */
8912 +       int32_t                 sc_cid;         /* crypto tag */
8913 +
8914 +       struct safe_dma_alloc   sc_ringalloc;   /* PE ring allocation state */
8915 +       struct safe_ringentry   *sc_ring;       /* PE ring */
8916 +       struct safe_ringentry   *sc_ringtop;    /* PE ring top */
8917 +       struct safe_ringentry   *sc_front;      /* next free entry */
8918 +       struct safe_ringentry   *sc_back;       /* next pending entry */
8919 +       int                     sc_nqchip;      /* # passed to chip */
8920 +       spinlock_t              sc_ringmtx;     /* PE ring lock */
8921 +       struct safe_pdesc       *sc_spring;     /* src particle ring */
8922 +       struct safe_pdesc       *sc_springtop;  /* src particle ring top */
8923 +       struct safe_pdesc       *sc_spfree;     /* next free src particle */
8924 +       struct safe_dma_alloc   sc_spalloc;     /* src particle ring state */
8925 +       struct safe_pdesc       *sc_dpring;     /* dest particle ring */
8926 +       struct safe_pdesc       *sc_dpringtop;  /* dest particle ring top */
8927 +       struct safe_pdesc       *sc_dpfree;     /* next free dest particle */
8928 +       struct safe_dma_alloc   sc_dpalloc;     /* dst particle ring state */
8929 +       int                     sc_nsessions;   /* # of sessions */
8930 +       struct safe_session     *sc_sessions;   /* sessions */
8931 +
8932 +       struct timer_list       sc_pkto;        /* PK polling */
8933 +       spinlock_t              sc_pkmtx;       /* PK lock */
8934 +       struct list_head        sc_pkq;         /* queue of PK requests */
8935 +       struct safe_pkq         *sc_pkq_cur;    /* current processing request */
8936 +       u_int32_t               sc_pk_reslen, sc_pk_resoff;
8937 +
8938 +       int                     sc_max_dsize;   /* maximum safe DMA size */
8939 +};
8940 +#endif /* __KERNEL__ */
8941 +
8942 +struct safe_stats {
8943 +       u_int64_t st_ibytes;
8944 +       u_int64_t st_obytes;
8945 +       u_int32_t st_ipackets;
8946 +       u_int32_t st_opackets;
8947 +       u_int32_t st_invalid;           /* invalid argument */
8948 +       u_int32_t st_badsession;        /* invalid session id */
8949 +       u_int32_t st_badflags;          /* flags indicate !(mbuf | uio) */
8950 +       u_int32_t st_nodesc;            /* op submitted w/o descriptors */
8951 +       u_int32_t st_badalg;            /* unsupported algorithm */
8952 +       u_int32_t st_ringfull;          /* PE descriptor ring full */
8953 +       u_int32_t st_peoperr;           /* PE marked error */
8954 +       u_int32_t st_dmaerr;            /* PE DMA error */
8955 +       u_int32_t st_bypasstoobig;      /* bypass > 96 bytes */
8956 +       u_int32_t st_skipmismatch;      /* enc part begins before auth part */
8957 +       u_int32_t st_lenmismatch;       /* enc length different auth length */
8958 +       u_int32_t st_coffmisaligned;    /* crypto offset not 32-bit aligned */
8959 +       u_int32_t st_cofftoobig;        /* crypto offset > 255 words */
8960 +       u_int32_t st_iovmisaligned;     /* iov op not aligned */
8961 +       u_int32_t st_iovnotuniform;     /* iov op not suitable */
8962 +       u_int32_t st_unaligned;         /* unaligned src caused copy */
8963 +       u_int32_t st_notuniform;        /* non-uniform src caused copy */
8964 +       u_int32_t st_nomap;             /* bus_dmamap_create failed */
8965 +       u_int32_t st_noload;            /* bus_dmamap_load_* failed */
8966 +       u_int32_t st_nombuf;            /* MGET* failed */
8967 +       u_int32_t st_nomcl;             /* MCLGET* failed */
8968 +       u_int32_t st_maxqchip;          /* max mcr1 ops out for processing */
8969 +       u_int32_t st_rng;               /* RNG requests */
8970 +       u_int32_t st_rngalarm;          /* RNG alarm requests */
8971 +       u_int32_t st_noicvcopy;         /* ICV data copies suppressed */
8972 +};
8973 +#endif /* _SAFE_SAFEVAR_H_ */
8974 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
8975 +++ linux/crypto/ocf/crypto.c   2008-07-03 10:58:33.000000000 +1000
8976 @@ -0,0 +1,1741 @@
8977 +/*-
8978 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
8979 + * Copyright (C) 2006-2007 David McCullough
8980 + * Copyright (C) 2004-2005 Intel Corporation.
8981 + * The license and original author are listed below.
8982 + *
8983 + * Redistribution and use in source and binary forms, with or without
8984 + * Copyright (c) 2002-2006 Sam Leffler.  All rights reserved.
8985 + *
8986 + * modification, are permitted provided that the following conditions
8987 + * are met:
8988 + * 1. Redistributions of source code must retain the above copyright
8989 + *    notice, this list of conditions and the following disclaimer.
8990 + * 2. Redistributions in binary form must reproduce the above copyright
8991 + *    notice, this list of conditions and the following disclaimer in the
8992 + *    documentation and/or other materials provided with the distribution.
8993 + *
8994 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
8995 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
8996 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
8997 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
8998 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
8999 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
9000 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
9001 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9002 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
9003 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9004 + */
9005 +
9006 +#if 0
9007 +#include <sys/cdefs.h>
9008 +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.27 2007/03/21 03:42:51 sam Exp $");
9009 +#endif
9010 +
9011 +/*
9012 + * Cryptographic Subsystem.
9013 + *
9014 + * This code is derived from the Openbsd Cryptographic Framework (OCF)
9015 + * that has the copyright shown below.  Very little of the original
9016 + * code remains.
9017 + */
9018 +/*-
9019 + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
9020 + *
9021 + * This code was written by Angelos D. Keromytis in Athens, Greece, in
9022 + * February 2000. Network Security Technologies Inc. (NSTI) kindly
9023 + * supported the development of this code.
9024 + *
9025 + * Copyright (c) 2000, 2001 Angelos D. Keromytis
9026 + *
9027 + * Permission to use, copy, and modify this software with or without fee
9028 + * is hereby granted, provided that this entire notice is included in
9029 + * all source code copies of any software which is or includes a copy or
9030 + * modification of this software.
9031 + *
9032 + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
9033 + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
9034 + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
9035 + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
9036 + * PURPOSE.
9037 + *
9038 +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.16 2005/01/07 02:29:16 imp Exp $");
9039 + */
9040 +
9041 +
9042 +#ifndef AUTOCONF_INCLUDED
9043 +#include <linux/config.h>
9044 +#endif
9045 +#include <linux/module.h>
9046 +#include <linux/init.h>
9047 +#include <linux/list.h>
9048 +#include <linux/slab.h>
9049 +#include <linux/wait.h>
9050 +#include <linux/sched.h>
9051 +#include <linux/spinlock.h>
9052 +#include <linux/version.h>
9053 +#include <cryptodev.h>
9054 +
9055 +/*
9056 + * keep track of whether or not we have been initialised, a big
9057 + * issue if we are linked into the kernel and a driver gets started before
9058 + * us
9059 + */
9060 +static int crypto_initted = 0;
9061 +
9062 +/*
9063 + * Crypto drivers register themselves by allocating a slot in the
9064 + * crypto_drivers table with crypto_get_driverid() and then registering
9065 + * each algorithm they support with crypto_register() and crypto_kregister().
9066 + */
9067 +
9068 +/*
9069 + * lock on driver table
9070 + * we track its state as spin_is_locked does not do anything on non-SMP boxes
9071 + */
9072 +static spinlock_t      crypto_drivers_lock;
9073 +static int                     crypto_drivers_locked;          /* for non-SMP boxes */
9074 +
9075 +#define        CRYPTO_DRIVER_LOCK() \
9076 +                       ({ \
9077 +                               spin_lock_irqsave(&crypto_drivers_lock, d_flags); \
9078 +                               crypto_drivers_locked = 1; \
9079 +                               dprintk("%s,%d: DRIVER_LOCK()\n", __FILE__, __LINE__); \
9080 +                        })
9081 +#define        CRYPTO_DRIVER_UNLOCK() \
9082 +                       ({ \
9083 +                               dprintk("%s,%d: DRIVER_UNLOCK()\n", __FILE__, __LINE__); \
9084 +                               crypto_drivers_locked = 0; \
9085 +                               spin_unlock_irqrestore(&crypto_drivers_lock, d_flags); \
9086 +                        })
9087 +#define        CRYPTO_DRIVER_ASSERT() \
9088 +                       ({ \
9089 +                               if (!crypto_drivers_locked) { \
9090 +                                       dprintk("%s,%d: DRIVER_ASSERT!\n", __FILE__, __LINE__); \
9091 +                               } \
9092 +                        })
9093 +
9094 +/*
9095 + * Crypto device/driver capabilities structure.
9096 + *
9097 + * Synchronization:
9098 + * (d) - protected by CRYPTO_DRIVER_LOCK()
9099 + * (q) - protected by CRYPTO_Q_LOCK()
9100 + * Not tagged fields are read-only.
9101 + */
9102 +struct cryptocap {
9103 +       device_t        cc_dev;                 /* (d) device/driver */
9104 +       u_int32_t       cc_sessions;            /* (d) # of sessions */
9105 +       u_int32_t       cc_koperations;         /* (d) # os asym operations */
9106 +       /*
9107 +        * Largest possible operator length (in bits) for each type of
9108 +        * encryption algorithm. XXX not used
9109 +        */
9110 +       u_int16_t       cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
9111 +       u_int8_t        cc_alg[CRYPTO_ALGORITHM_MAX + 1];
9112 +       u_int8_t        cc_kalg[CRK_ALGORITHM_MAX + 1];
9113 +
9114 +       int             cc_flags;               /* (d) flags */
9115 +#define CRYPTOCAP_F_CLEANUP    0x80000000      /* needs resource cleanup */
9116 +       int             cc_qblocked;            /* (q) symmetric q blocked */
9117 +       int             cc_kqblocked;           /* (q) asymmetric q blocked */
9118 +};
9119 +static struct cryptocap *crypto_drivers = NULL;
9120 +static int crypto_drivers_num = 0;
9121 +
9122 +/*
9123 + * There are two queues for crypto requests; one for symmetric (e.g.
9124 + * cipher) operations and one for asymmetric (e.g. MOD)operations.
9125 + * A single mutex is used to lock access to both queues.  We could
9126 + * have one per-queue but having one simplifies handling of block/unblock
9127 + * operations.
9128 + */
9129 +static int crp_sleep = 0;
9130 +static LIST_HEAD(crp_q);               /* request queues */
9131 +static LIST_HEAD(crp_kq);
9132 +
9133 +static spinlock_t crypto_q_lock;
9134 +
9135 +int crypto_all_qblocked = 0;  /* protect with Q_LOCK */
9136 +module_param(crypto_all_qblocked, int, 0444);
9137 +MODULE_PARM_DESC(crypto_all_qblocked, "Are all crypto queues blocked");
9138 +
9139 +int crypto_all_kqblocked = 0; /* protect with Q_LOCK */
9140 +module_param(crypto_all_kqblocked, int, 0444);
9141 +MODULE_PARM_DESC(crypto_all_kqblocked, "Are all asym crypto queues blocked");
9142 +
9143 +#define        CRYPTO_Q_LOCK() \
9144 +                       ({ \
9145 +                               spin_lock_irqsave(&crypto_q_lock, q_flags); \
9146 +                               dprintk("%s,%d: Q_LOCK()\n", __FILE__, __LINE__); \
9147 +                        })
9148 +#define        CRYPTO_Q_UNLOCK() \
9149 +                       ({ \
9150 +                               dprintk("%s,%d: Q_UNLOCK()\n", __FILE__, __LINE__); \
9151 +                               spin_unlock_irqrestore(&crypto_q_lock, q_flags); \
9152 +                        })
9153 +
9154 +/*
9155 + * There are two queues for processing completed crypto requests; one
9156 + * for the symmetric and one for the asymmetric ops.  We only need one
9157 + * but have two to avoid type futzing (cryptop vs. cryptkop).  A single
9158 + * mutex is used to lock access to both queues.  Note that this lock
9159 + * must be separate from the lock on request queues to insure driver
9160 + * callbacks don't generate lock order reversals.
9161 + */
9162 +static LIST_HEAD(crp_ret_q);           /* callback queues */
9163 +static LIST_HEAD(crp_ret_kq);
9164 +
9165 +static spinlock_t crypto_ret_q_lock;
9166 +#define        CRYPTO_RETQ_LOCK() \
9167 +                       ({ \
9168 +                               spin_lock_irqsave(&crypto_ret_q_lock, r_flags); \
9169 +                               dprintk("%s,%d: RETQ_LOCK\n", __FILE__, __LINE__); \
9170 +                        })
9171 +#define        CRYPTO_RETQ_UNLOCK() \
9172 +                       ({ \
9173 +                               dprintk("%s,%d: RETQ_UNLOCK\n", __FILE__, __LINE__); \
9174 +                               spin_unlock_irqrestore(&crypto_ret_q_lock, r_flags); \
9175 +                        })
9176 +#define        CRYPTO_RETQ_EMPTY()     (list_empty(&crp_ret_q) && list_empty(&crp_ret_kq))
9177 +
9178 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
9179 +static kmem_cache_t *cryptop_zone;
9180 +static kmem_cache_t *cryptodesc_zone;
9181 +#else
9182 +static struct kmem_cache *cryptop_zone;
9183 +static struct kmem_cache *cryptodesc_zone;
9184 +#endif
9185 +
9186 +#define debug crypto_debug
9187 +int crypto_debug = 0;
9188 +module_param(crypto_debug, int, 0644);
9189 +MODULE_PARM_DESC(crypto_debug, "Enable debug");
9190 +EXPORT_SYMBOL(crypto_debug);
9191 +
9192 +/*
9193 + * Maximum number of outstanding crypto requests before we start
9194 + * failing requests.  We need this to prevent DOS when too many
9195 + * requests are arriving for us to keep up.  Otherwise we will
9196 + * run the system out of memory.  Since crypto is slow,  we are
9197 + * usually the bottleneck that needs to say, enough is enough.
9198 + *
9199 + * We cannot print errors when this condition occurs,  we are already too
9200 + * slow,  printing anything will just kill us
9201 + */
9202 +
9203 +static int crypto_q_cnt = 0;
9204 +module_param(crypto_q_cnt, int, 0444);
9205 +MODULE_PARM_DESC(crypto_q_cnt,
9206 +               "Current number of outstanding crypto requests");
9207 +
9208 +static int crypto_q_max = 1000;
9209 +module_param(crypto_q_max, int, 0644);
9210 +MODULE_PARM_DESC(crypto_q_max,
9211 +               "Maximum number of outstanding crypto requests");
9212 +
9213 +#define bootverbose crypto_verbose
9214 +static int crypto_verbose = 0;
9215 +module_param(crypto_verbose, int, 0644);
9216 +MODULE_PARM_DESC(crypto_verbose,
9217 +               "Enable verbose crypto startup");
9218 +
9219 +int    crypto_usercrypto = 1;  /* userland may do crypto reqs */
9220 +module_param(crypto_usercrypto, int, 0644);
9221 +MODULE_PARM_DESC(crypto_usercrypto,
9222 +          "Enable/disable user-mode access to crypto support");
9223 +
9224 +int    crypto_userasymcrypto = 1;      /* userland may do asym crypto reqs */
9225 +module_param(crypto_userasymcrypto, int, 0644);
9226 +MODULE_PARM_DESC(crypto_userasymcrypto,
9227 +          "Enable/disable user-mode access to asymmetric crypto support");
9228 +
9229 +int    crypto_devallowsoft = 0;        /* only use hardware crypto */
9230 +module_param(crypto_devallowsoft, int, 0644);
9231 +MODULE_PARM_DESC(crypto_devallowsoft,
9232 +          "Enable/disable use of software crypto support");
9233 +
9234 +static pid_t   cryptoproc = (pid_t) -1;
9235 +static struct  completion cryptoproc_exited;
9236 +static DECLARE_WAIT_QUEUE_HEAD(cryptoproc_wait);
9237 +static pid_t   cryptoretproc = (pid_t) -1;
9238 +static struct  completion cryptoretproc_exited;
9239 +static DECLARE_WAIT_QUEUE_HEAD(cryptoretproc_wait);
9240 +
9241 +static int crypto_proc(void *arg);
9242 +static int crypto_ret_proc(void *arg);
9243 +static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
9244 +static int crypto_kinvoke(struct cryptkop *krp, int flags);
9245 +static void crypto_exit(void);
9246 +static  int crypto_init(void);
9247 +
9248 +static struct cryptostats cryptostats;
9249 +
9250 +static struct cryptocap *
9251 +crypto_checkdriver(u_int32_t hid)
9252 +{
9253 +       if (crypto_drivers == NULL)
9254 +               return NULL;
9255 +       return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
9256 +}
9257 +
9258 +/*
9259 + * Compare a driver's list of supported algorithms against another
9260 + * list; return non-zero if all algorithms are supported.
9261 + */
9262 +static int
9263 +driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
9264 +{
9265 +       const struct cryptoini *cr;
9266 +
9267 +       /* See if all the algorithms are supported. */
9268 +       for (cr = cri; cr; cr = cr->cri_next)
9269 +               if (cap->cc_alg[cr->cri_alg] == 0)
9270 +                       return 0;
9271 +       return 1;
9272 +}
9273 +
9274 +/*
9275 + * Select a driver for a new session that supports the specified
9276 + * algorithms and, optionally, is constrained according to the flags.
9277 + * The algorithm we use here is pretty stupid; just use the
9278 + * first driver that supports all the algorithms we need. If there
9279 + * are multiple drivers we choose the driver with the fewest active
9280 + * sessions.  We prefer hardware-backed drivers to software ones.
9281 + *
9282 + * XXX We need more smarts here (in real life too, but that's
9283 + * XXX another story altogether).
9284 + */
9285 +static struct cryptocap *
9286 +crypto_select_driver(const struct cryptoini *cri, int flags)
9287 +{
9288 +       struct cryptocap *cap, *best;
9289 +       int match, hid;
9290 +
9291 +       CRYPTO_DRIVER_ASSERT();
9292 +
9293 +       /*
9294 +        * Look first for hardware crypto devices if permitted.
9295 +        */
9296 +       if (flags & CRYPTOCAP_F_HARDWARE)
9297 +               match = CRYPTOCAP_F_HARDWARE;
9298 +       else
9299 +               match = CRYPTOCAP_F_SOFTWARE;
9300 +       best = NULL;
9301 +again:
9302 +       for (hid = 0; hid < crypto_drivers_num; hid++) {
9303 +               cap = &crypto_drivers[hid];
9304 +               /*
9305 +                * If it's not initialized, is in the process of
9306 +                * going away, or is not appropriate (hardware
9307 +                * or software based on match), then skip.
9308 +                */
9309 +               if (cap->cc_dev == NULL ||
9310 +                   (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
9311 +                   (cap->cc_flags & match) == 0)
9312 +                       continue;
9313 +
9314 +               /* verify all the algorithms are supported. */
9315 +               if (driver_suitable(cap, cri)) {
9316 +                       if (best == NULL ||
9317 +                           cap->cc_sessions < best->cc_sessions)
9318 +                               best = cap;
9319 +               }
9320 +       }
9321 +       if (best != NULL)
9322 +               return best;
9323 +       if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
9324 +               /* sort of an Algol 68-style for loop */
9325 +               match = CRYPTOCAP_F_SOFTWARE;
9326 +               goto again;
9327 +       }
9328 +       return best;
9329 +}
9330 +
9331 +/*
9332 + * Create a new session.  The crid argument specifies a crypto
9333 + * driver to use or constraints on a driver to select (hardware
9334 + * only, software only, either).  Whatever driver is selected
9335 + * must be capable of the requested crypto algorithms.
9336 + */
9337 +int
9338 +crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
9339 +{
9340 +       struct cryptocap *cap;
9341 +       u_int32_t hid, lid;
9342 +       int err;
9343 +       unsigned long d_flags;
9344 +
9345 +       CRYPTO_DRIVER_LOCK();
9346 +       if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
9347 +               /*
9348 +                * Use specified driver; verify it is capable.
9349 +                */
9350 +               cap = crypto_checkdriver(crid);
9351 +               if (cap != NULL && !driver_suitable(cap, cri))
9352 +                       cap = NULL;
9353 +       } else {
9354 +               /*
9355 +                * No requested driver; select based on crid flags.
9356 +                */
9357 +               cap = crypto_select_driver(cri, crid);
9358 +               /*
9359 +                * if NULL then can't do everything in one session.
9360 +                * XXX Fix this. We need to inject a "virtual" session
9361 +                * XXX layer right about here.
9362 +                */
9363 +       }
9364 +       if (cap != NULL) {
9365 +               /* Call the driver initialization routine. */
9366 +               hid = cap - crypto_drivers;
9367 +               lid = hid;              /* Pass the driver ID. */
9368 +               cap->cc_sessions++;
9369 +               CRYPTO_DRIVER_UNLOCK();
9370 +               err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
9371 +               CRYPTO_DRIVER_LOCK();
9372 +               if (err == 0) {
9373 +                       (*sid) = (cap->cc_flags & 0xff000000)
9374 +                              | (hid & 0x00ffffff);
9375 +                       (*sid) <<= 32;
9376 +                       (*sid) |= (lid & 0xffffffff);
9377 +               } else
9378 +                       cap->cc_sessions--;
9379 +       } else
9380 +               err = EINVAL;
9381 +       CRYPTO_DRIVER_UNLOCK();
9382 +       return err;
9383 +}
9384 +
9385 +static void
9386 +crypto_remove(struct cryptocap *cap)
9387 +{
9388 +       CRYPTO_DRIVER_ASSERT();
9389 +       if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
9390 +               bzero(cap, sizeof(*cap));
9391 +}
9392 +
9393 +/*
9394 + * Delete an existing session (or a reserved session on an unregistered
9395 + * driver).
9396 + */
9397 +int
9398 +crypto_freesession(u_int64_t sid)
9399 +{
9400 +       struct cryptocap *cap;
9401 +       u_int32_t hid;
9402 +       int err = 0;
9403 +       unsigned long d_flags;
9404 +
9405 +       dprintk("%s()\n", __FUNCTION__);
9406 +       CRYPTO_DRIVER_LOCK();
9407 +
9408 +       if (crypto_drivers == NULL) {
9409 +               err = EINVAL;
9410 +               goto done;
9411 +       }
9412 +
9413 +       /* Determine two IDs. */
9414 +       hid = CRYPTO_SESID2HID(sid);
9415 +
9416 +       if (hid >= crypto_drivers_num) {
9417 +               dprintk("%s - INVALID DRIVER NUM %d\n", __FUNCTION__, hid);
9418 +               err = ENOENT;
9419 +               goto done;
9420 +       }
9421 +       cap = &crypto_drivers[hid];
9422 +
9423 +       if (cap->cc_dev) {
9424 +               CRYPTO_DRIVER_UNLOCK();
9425 +               /* Call the driver cleanup routine, if available, unlocked. */
9426 +               err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
9427 +               CRYPTO_DRIVER_LOCK();
9428 +       }
9429 +
9430 +       if (cap->cc_sessions)
9431 +               cap->cc_sessions--;
9432 +
9433 +       if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
9434 +               crypto_remove(cap);
9435 +
9436 +done:
9437 +       CRYPTO_DRIVER_UNLOCK();
9438 +       return err;
9439 +}
9440 +
9441 +/*
9442 + * Return an unused driver id.  Used by drivers prior to registering
9443 + * support for the algorithms they handle.
9444 + */
9445 +int32_t
9446 +crypto_get_driverid(device_t dev, int flags)
9447 +{
9448 +       struct cryptocap *newdrv;
9449 +       int i;
9450 +       unsigned long d_flags;
9451 +
9452 +       if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
9453 +               printf("%s: no flags specified when registering driver\n",
9454 +                   device_get_nameunit(dev));
9455 +               return -1;
9456 +       }
9457 +
9458 +       CRYPTO_DRIVER_LOCK();
9459 +
9460 +       for (i = 0; i < crypto_drivers_num; i++) {
9461 +               if (crypto_drivers[i].cc_dev == NULL &&
9462 +                   (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
9463 +                       break;
9464 +               }
9465 +       }
9466 +
9467 +       /* Out of entries, allocate some more. */
9468 +       if (i == crypto_drivers_num) {
9469 +               /* Be careful about wrap-around. */
9470 +               if (2 * crypto_drivers_num <= crypto_drivers_num) {
9471 +                       CRYPTO_DRIVER_UNLOCK();
9472 +                       printk("crypto: driver count wraparound!\n");
9473 +                       return -1;
9474 +               }
9475 +
9476 +               newdrv = kmalloc(2 * crypto_drivers_num * sizeof(struct cryptocap),
9477 +                               GFP_KERNEL);
9478 +               if (newdrv == NULL) {
9479 +                       CRYPTO_DRIVER_UNLOCK();
9480 +                       printk("crypto: no space to expand driver table!\n");
9481 +                       return -1;
9482 +               }
9483 +
9484 +               memcpy(newdrv, crypto_drivers,
9485 +                               crypto_drivers_num * sizeof(struct cryptocap));
9486 +               memset(&newdrv[crypto_drivers_num], 0,
9487 +                               crypto_drivers_num * sizeof(struct cryptocap));
9488 +
9489 +               crypto_drivers_num *= 2;
9490 +
9491 +               kfree(crypto_drivers);
9492 +               crypto_drivers = newdrv;
9493 +       }
9494 +
9495 +       /* NB: state is zero'd on free */
9496 +       crypto_drivers[i].cc_sessions = 1;      /* Mark */
9497 +       crypto_drivers[i].cc_dev = dev;
9498 +       crypto_drivers[i].cc_flags = flags;
9499 +       if (bootverbose)
9500 +               printf("crypto: assign %s driver id %u, flags %u\n",
9501 +                   device_get_nameunit(dev), i, flags);
9502 +
9503 +       CRYPTO_DRIVER_UNLOCK();
9504 +
9505 +       return i;
9506 +}
9507 +
9508 +/*
9509 + * Lookup a driver by name.  We match against the full device
9510 + * name and unit, and against just the name.  The latter gives
9511 + * us a simple widlcarding by device name.  On success return the
9512 + * driver/hardware identifier; otherwise return -1.
9513 + */
9514 +int
9515 +crypto_find_driver(const char *match)
9516 +{
9517 +       int i, len = strlen(match);
9518 +       unsigned long d_flags;
9519 +
9520 +       CRYPTO_DRIVER_LOCK();
9521 +       for (i = 0; i < crypto_drivers_num; i++) {
9522 +               device_t dev = crypto_drivers[i].cc_dev;
9523 +               if (dev == NULL ||
9524 +                   (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
9525 +                       continue;
9526 +               if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
9527 +                   strncmp(match, device_get_name(dev), len) == 0)
9528 +                       break;
9529 +       }
9530 +       CRYPTO_DRIVER_UNLOCK();
9531 +       return i < crypto_drivers_num ? i : -1;
9532 +}
9533 +
9534 +/*
9535 + * Return the device_t for the specified driver or NULL
9536 + * if the driver identifier is invalid.
9537 + */
9538 +device_t
9539 +crypto_find_device_byhid(int hid)
9540 +{
9541 +       struct cryptocap *cap = crypto_checkdriver(hid);
9542 +       return cap != NULL ? cap->cc_dev : NULL;
9543 +}
9544 +
9545 +/*
9546 + * Return the device/driver capabilities.
9547 + */
9548 +int
9549 +crypto_getcaps(int hid)
9550 +{
9551 +       struct cryptocap *cap = crypto_checkdriver(hid);
9552 +       return cap != NULL ? cap->cc_flags : 0;
9553 +}
9554 +
9555 +/*
9556 + * Register support for a key-related algorithm.  This routine
9557 + * is called once for each algorithm supported a driver.
9558 + */
9559 +int
9560 +crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
9561 +{
9562 +       struct cryptocap *cap;
9563 +       int err;
9564 +       unsigned long d_flags;
9565 +
9566 +       dprintk("%s()\n", __FUNCTION__);
9567 +       CRYPTO_DRIVER_LOCK();
9568 +
9569 +       cap = crypto_checkdriver(driverid);
9570 +       if (cap != NULL &&
9571 +           (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
9572 +               /*
9573 +                * XXX Do some performance testing to determine placing.
9574 +                * XXX We probably need an auxiliary data structure that
9575 +                * XXX describes relative performances.
9576 +                */
9577 +
9578 +               cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
9579 +               if (bootverbose)
9580 +                       printf("crypto: %s registers key alg %u flags %u\n"
9581 +                               , device_get_nameunit(cap->cc_dev)
9582 +                               , kalg
9583 +                               , flags
9584 +                       );
9585 +               err = 0;
9586 +       } else
9587 +               err = EINVAL;
9588 +
9589 +       CRYPTO_DRIVER_UNLOCK();
9590 +       return err;
9591 +}
9592 +
9593 +/*
9594 + * Register support for a non-key-related algorithm.  This routine
9595 + * is called once for each such algorithm supported by a driver.
9596 + */
9597 +int
9598 +crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
9599 +    u_int32_t flags)
9600 +{
9601 +       struct cryptocap *cap;
9602 +       int err;
9603 +       unsigned long d_flags;
9604 +
9605 +       dprintk("%s(id=0x%x, alg=%d, maxoplen=%d, flags=0x%x)\n", __FUNCTION__,
9606 +                       driverid, alg, maxoplen, flags);
9607 +
9608 +       CRYPTO_DRIVER_LOCK();
9609 +
9610 +       cap = crypto_checkdriver(driverid);
9611 +       /* NB: algorithms are in the range [1..max] */
9612 +       if (cap != NULL &&
9613 +           (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
9614 +               /*
9615 +                * XXX Do some performance testing to determine placing.
9616 +                * XXX We probably need an auxiliary data structure that
9617 +                * XXX describes relative performances.
9618 +                */
9619 +
9620 +               cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
9621 +               cap->cc_max_op_len[alg] = maxoplen;
9622 +               if (bootverbose)
9623 +                       printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
9624 +                               , device_get_nameunit(cap->cc_dev)
9625 +                               , alg
9626 +                               , flags
9627 +                               , maxoplen
9628 +                       );
9629 +               cap->cc_sessions = 0;           /* Unmark */
9630 +               err = 0;
9631 +       } else
9632 +               err = EINVAL;
9633 +
9634 +       CRYPTO_DRIVER_UNLOCK();
9635 +       return err;
9636 +}
9637 +
9638 +static void
9639 +driver_finis(struct cryptocap *cap)
9640 +{
9641 +       u_int32_t ses, kops;
9642 +
9643 +       CRYPTO_DRIVER_ASSERT();
9644 +
9645 +       ses = cap->cc_sessions;
9646 +       kops = cap->cc_koperations;
9647 +       bzero(cap, sizeof(*cap));
9648 +       if (ses != 0 || kops != 0) {
9649 +               /*
9650 +                * If there are pending sessions,
9651 +                * just mark as invalid.
9652 +                */
9653 +               cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
9654 +               cap->cc_sessions = ses;
9655 +               cap->cc_koperations = kops;
9656 +       }
9657 +}
9658 +
9659 +/*
9660 + * Unregister a crypto driver. If there are pending sessions using it,
9661 + * leave enough information around so that subsequent calls using those
9662 + * sessions will correctly detect the driver has been unregistered and
9663 + * reroute requests.
9664 + */
9665 +int
9666 +crypto_unregister(u_int32_t driverid, int alg)
9667 +{
9668 +       struct cryptocap *cap;
9669 +       int i, err;
9670 +       unsigned long d_flags;
9671 +
9672 +       dprintk("%s()\n", __FUNCTION__);
9673 +       CRYPTO_DRIVER_LOCK();
9674 +
9675 +       cap = crypto_checkdriver(driverid);
9676 +       if (cap != NULL &&
9677 +           (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
9678 +           cap->cc_alg[alg] != 0) {
9679 +               cap->cc_alg[alg] = 0;
9680 +               cap->cc_max_op_len[alg] = 0;
9681 +
9682 +               /* Was this the last algorithm ? */
9683 +               for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
9684 +                       if (cap->cc_alg[i] != 0)
9685 +                               break;
9686 +
9687 +               if (i == CRYPTO_ALGORITHM_MAX + 1)
9688 +                       driver_finis(cap);
9689 +               err = 0;
9690 +       } else
9691 +               err = EINVAL;
9692 +       CRYPTO_DRIVER_UNLOCK();
9693 +       return err;
9694 +}
9695 +
9696 +/*
9697 + * Unregister all algorithms associated with a crypto driver.
9698 + * If there are pending sessions using it, leave enough information
9699 + * around so that subsequent calls using those sessions will
9700 + * correctly detect the driver has been unregistered and reroute
9701 + * requests.
9702 + */
9703 +int
9704 +crypto_unregister_all(u_int32_t driverid)
9705 +{
9706 +       struct cryptocap *cap;
9707 +       int err;
9708 +       unsigned long d_flags;
9709 +
9710 +       dprintk("%s()\n", __FUNCTION__);
9711 +       CRYPTO_DRIVER_LOCK();
9712 +       cap = crypto_checkdriver(driverid);
9713 +       if (cap != NULL) {
9714 +               driver_finis(cap);
9715 +               err = 0;
9716 +       } else
9717 +               err = EINVAL;
9718 +       CRYPTO_DRIVER_UNLOCK();
9719 +
9720 +       return err;
9721 +}
9722 +
9723 +/*
9724 + * Clear blockage on a driver.  The what parameter indicates whether
9725 + * the driver is now ready for cryptop's and/or cryptokop's.
9726 + */
9727 +int
9728 +crypto_unblock(u_int32_t driverid, int what)
9729 +{
9730 +       struct cryptocap *cap;
9731 +       int err;
9732 +       unsigned long q_flags;
9733 +
9734 +       CRYPTO_Q_LOCK();
9735 +       cap = crypto_checkdriver(driverid);
9736 +       if (cap != NULL) {
9737 +               if (what & CRYPTO_SYMQ) {
9738 +                       cap->cc_qblocked = 0;
9739 +                       crypto_all_qblocked = 0;
9740 +               }
9741 +               if (what & CRYPTO_ASYMQ) {
9742 +                       cap->cc_kqblocked = 0;
9743 +                       crypto_all_kqblocked = 0;
9744 +               }
9745 +               if (crp_sleep)
9746 +                       wake_up_interruptible(&cryptoproc_wait);
9747 +               err = 0;
9748 +       } else
9749 +               err = EINVAL;
9750 +       CRYPTO_Q_UNLOCK(); //DAVIDM should this be a driver lock
9751 +
9752 +       return err;
9753 +}
9754 +
9755 +/*
9756 + * Add a crypto request to a queue, to be processed by the kernel thread.
9757 + */
9758 +int
9759 +crypto_dispatch(struct cryptop *crp)
9760 +{
9761 +       struct cryptocap *cap;
9762 +       int result = -1;
9763 +       unsigned long q_flags;
9764 +
9765 +       dprintk("%s()\n", __FUNCTION__);
9766 +
9767 +       cryptostats.cs_ops++;
9768 +
9769 +       CRYPTO_Q_LOCK();
9770 +       if (crypto_q_cnt >= crypto_q_max) {
9771 +               CRYPTO_Q_UNLOCK();
9772 +               cryptostats.cs_drops++;
9773 +               return ENOMEM;
9774 +       }
9775 +       crypto_q_cnt++;
9776 +
9777 +       /*
9778 +        * Caller marked the request to be processed immediately; dispatch
9779 +        * it directly to the driver unless the driver is currently blocked.
9780 +        */
9781 +       if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
9782 +               int hid = CRYPTO_SESID2HID(crp->crp_sid);
9783 +               cap = crypto_checkdriver(hid);
9784 +               /* Driver cannot disappear when there is an active session. */
9785 +               KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
9786 +               if (!cap->cc_qblocked) {
9787 +                       crypto_all_qblocked = 0;
9788 +                       crypto_drivers[hid].cc_qblocked = 1;
9789 +                       CRYPTO_Q_UNLOCK();
9790 +                       result = crypto_invoke(cap, crp, 0);
9791 +                       CRYPTO_Q_LOCK();
9792 +                       if (result != ERESTART)
9793 +                               crypto_drivers[hid].cc_qblocked = 0;
9794 +               }
9795 +       }
9796 +       if (result == ERESTART) {
9797 +               /*
9798 +                * The driver ran out of resources, mark the
9799 +                * driver ``blocked'' for cryptop's and put
9800 +                * the request back in the queue.  It would
9801 +                * best to put the request back where we got
9802 +                * it but that's hard so for now we put it
9803 +                * at the front.  This should be ok; putting
9804 +                * it at the end does not work.
9805 +                */
9806 +               list_add(&crp->crp_next, &crp_q);
9807 +               cryptostats.cs_blocks++;
9808 +       } else if (result == -1) {
9809 +               TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
9810 +       }
9811 +       if (crp_sleep)
9812 +               wake_up_interruptible(&cryptoproc_wait);
9813 +       CRYPTO_Q_UNLOCK();
9814 +       return 0;
9815 +}
9816 +
9817 +/*
9818 + * Add an asymetric crypto request to a queue,
9819 + * to be processed by the kernel thread.
9820 + */
9821 +int
9822 +crypto_kdispatch(struct cryptkop *krp)
9823 +{
9824 +       int error;
9825 +       unsigned long q_flags;
9826 +
9827 +       cryptostats.cs_kops++;
9828 +
9829 +       error = crypto_kinvoke(krp, krp->krp_crid);
9830 +       if (error == ERESTART) {
9831 +               CRYPTO_Q_LOCK();
9832 +               TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
9833 +               if (crp_sleep)
9834 +                       wake_up_interruptible(&cryptoproc_wait);
9835 +               CRYPTO_Q_UNLOCK();
9836 +               error = 0;
9837 +       }
9838 +       return error;
9839 +}
9840 +
9841 +/*
9842 + * Verify a driver is suitable for the specified operation.
9843 + */
9844 +static __inline int
9845 +kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
9846 +{
9847 +       return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
9848 +}
9849 +
9850 +/*
9851 + * Select a driver for an asym operation.  The driver must
9852 + * support the necessary algorithm.  The caller can constrain
9853 + * which device is selected with the flags parameter.  The
9854 + * algorithm we use here is pretty stupid; just use the first
9855 + * driver that supports the algorithms we need. If there are
9856 + * multiple suitable drivers we choose the driver with the
9857 + * fewest active operations.  We prefer hardware-backed
9858 + * drivers to software ones when either may be used.
9859 + */
9860 +static struct cryptocap *
9861 +crypto_select_kdriver(const struct cryptkop *krp, int flags)
9862 +{
9863 +       struct cryptocap *cap, *best, *blocked;
9864 +       int match, hid;
9865 +
9866 +       CRYPTO_DRIVER_ASSERT();
9867 +
9868 +       /*
9869 +        * Look first for hardware crypto devices if permitted.
9870 +        */
9871 +       if (flags & CRYPTOCAP_F_HARDWARE)
9872 +               match = CRYPTOCAP_F_HARDWARE;
9873 +       else
9874 +               match = CRYPTOCAP_F_SOFTWARE;
9875 +       best = NULL;
9876 +       blocked = NULL;
9877 +again:
9878 +       for (hid = 0; hid < crypto_drivers_num; hid++) {
9879 +               cap = &crypto_drivers[hid];
9880 +               /*
9881 +                * If it's not initialized, is in the process of
9882 +                * going away, or is not appropriate (hardware
9883 +                * or software based on match), then skip.
9884 +                */
9885 +               if (cap->cc_dev == NULL ||
9886 +                   (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
9887 +                   (cap->cc_flags & match) == 0)
9888 +                       continue;
9889 +
9890 +               /* verify all the algorithms are supported. */
9891 +               if (kdriver_suitable(cap, krp)) {
9892 +                       if (best == NULL ||
9893 +                           cap->cc_koperations < best->cc_koperations)
9894 +                               best = cap;
9895 +               }
9896 +       }
9897 +       if (best != NULL)
9898 +               return best;
9899 +       if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
9900 +               /* sort of an Algol 68-style for loop */
9901 +               match = CRYPTOCAP_F_SOFTWARE;
9902 +               goto again;
9903 +       }
9904 +       return best;
9905 +}
9906 +
9907 +/*
9908 + * Dispatch an assymetric crypto request.
9909 + */
9910 +static int
9911 +crypto_kinvoke(struct cryptkop *krp, int crid)
9912 +{
9913 +       struct cryptocap *cap = NULL;
9914 +       int error;
9915 +       unsigned long d_flags;
9916 +
9917 +       KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
9918 +       KASSERT(krp->krp_callback != NULL,
9919 +           ("%s: krp->crp_callback == NULL", __func__));
9920 +
9921 +       CRYPTO_DRIVER_LOCK();
9922 +       if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
9923 +               cap = crypto_checkdriver(crid);
9924 +               if (cap != NULL) {
9925 +                       /*
9926 +                        * Driver present, it must support the necessary
9927 +                        * algorithm and, if s/w drivers are excluded,
9928 +                        * it must be registered as hardware-backed.
9929 +                        */
9930 +                       if (!kdriver_suitable(cap, krp) ||
9931 +                           (!crypto_devallowsoft &&
9932 +                            (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
9933 +                               cap = NULL;
9934 +               }
9935 +       } else {
9936 +               /*
9937 +                * No requested driver; select based on crid flags.
9938 +                */
9939 +               if (!crypto_devallowsoft)       /* NB: disallow s/w drivers */
9940 +                       crid &= ~CRYPTOCAP_F_SOFTWARE;
9941 +               cap = crypto_select_kdriver(krp, crid);
9942 +       }
9943 +       if (cap != NULL && !cap->cc_kqblocked) {
9944 +               krp->krp_hid = cap - crypto_drivers;
9945 +               cap->cc_koperations++;
9946 +               CRYPTO_DRIVER_UNLOCK();
9947 +               error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
9948 +               CRYPTO_DRIVER_LOCK();
9949 +               if (error == ERESTART) {
9950 +                       cap->cc_koperations--;
9951 +                       CRYPTO_DRIVER_UNLOCK();
9952 +                       return (error);
9953 +               }
9954 +               /* return the actual device used */
9955 +               krp->krp_crid = krp->krp_hid;
9956 +       } else {
9957 +               /*
9958 +                * NB: cap is !NULL if device is blocked; in
9959 +                *     that case return ERESTART so the operation
9960 +                *     is resubmitted if possible.
9961 +                */
9962 +               error = (cap == NULL) ? ENODEV : ERESTART;
9963 +       }
9964 +       CRYPTO_DRIVER_UNLOCK();
9965 +
9966 +       if (error) {
9967 +               krp->krp_status = error;
9968 +               crypto_kdone(krp);
9969 +       }
9970 +       return 0;
9971 +}
9972 +
9973 +
9974 +/*
9975 + * Dispatch a crypto request to the appropriate crypto devices.
9976 + */
9977 +static int
9978 +crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
9979 +{
9980 +       KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
9981 +       KASSERT(crp->crp_callback != NULL,
9982 +           ("%s: crp->crp_callback == NULL", __func__));
9983 +       KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
9984 +
9985 +       dprintk("%s()\n", __FUNCTION__);
9986 +
9987 +#ifdef CRYPTO_TIMING
9988 +       if (crypto_timing)
9989 +               crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
9990 +#endif
9991 +       if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
9992 +               struct cryptodesc *crd;
9993 +               u_int64_t nid;
9994 +
9995 +               /*
9996 +                * Driver has unregistered; migrate the session and return
9997 +                * an error to the caller so they'll resubmit the op.
9998 +                *
9999 +                * XXX: What if there are more already queued requests for this
10000 +                *      session?
10001 +                */
10002 +               crypto_freesession(crp->crp_sid);
10003 +
10004 +               for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
10005 +                       crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
10006 +
10007 +               /* XXX propagate flags from initial session? */
10008 +               if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
10009 +                   CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
10010 +                       crp->crp_sid = nid;
10011 +
10012 +               crp->crp_etype = EAGAIN;
10013 +               crypto_done(crp);
10014 +               return 0;
10015 +       } else {
10016 +               /*
10017 +                * Invoke the driver to process the request.
10018 +                */
10019 +               return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
10020 +       }
10021 +}
10022 +
10023 +/*
10024 + * Release a set of crypto descriptors.
10025 + */
10026 +void
10027 +crypto_freereq(struct cryptop *crp)
10028 +{
10029 +       struct cryptodesc *crd;
10030 +
10031 +       if (crp == NULL)
10032 +               return;
10033 +
10034 +#ifdef DIAGNOSTIC
10035 +       {
10036 +               struct cryptop *crp2;
10037 +               unsigned long q_flags;
10038 +
10039 +               CRYPTO_Q_LOCK();
10040 +               TAILQ_FOREACH(crp2, &crp_q, crp_next) {
10041 +                       KASSERT(crp2 != crp,
10042 +                           ("Freeing cryptop from the crypto queue (%p).",
10043 +                           crp));
10044 +               }
10045 +               CRYPTO_Q_UNLOCK();
10046 +               CRYPTO_RETQ_LOCK();
10047 +               TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
10048 +                       KASSERT(crp2 != crp,
10049 +                           ("Freeing cryptop from the return queue (%p).",
10050 +                           crp));
10051 +               }
10052 +               CRYPTO_RETQ_UNLOCK();
10053 +       }
10054 +#endif
10055 +
10056 +       while ((crd = crp->crp_desc) != NULL) {
10057 +               crp->crp_desc = crd->crd_next;
10058 +               kmem_cache_free(cryptodesc_zone, crd);
10059 +       }
10060 +       kmem_cache_free(cryptop_zone, crp);
10061 +}
10062 +
10063 +/*
10064 + * Acquire a set of crypto descriptors.
10065 + */
10066 +struct cryptop *
10067 +crypto_getreq(int num)
10068 +{
10069 +       struct cryptodesc *crd;
10070 +       struct cryptop *crp;
10071 +
10072 +       crp = kmem_cache_alloc(cryptop_zone, SLAB_ATOMIC);
10073 +       if (crp != NULL) {
10074 +               memset(crp, 0, sizeof(*crp));
10075 +               INIT_LIST_HEAD(&crp->crp_next);
10076 +               init_waitqueue_head(&crp->crp_waitq);
10077 +               while (num--) {
10078 +                       crd = kmem_cache_alloc(cryptodesc_zone, SLAB_ATOMIC);
10079 +                       if (crd == NULL) {
10080 +                               crypto_freereq(crp);
10081 +                               return NULL;
10082 +                       }
10083 +                       memset(crd, 0, sizeof(*crd));
10084 +                       crd->crd_next = crp->crp_desc;
10085 +                       crp->crp_desc = crd;
10086 +               }
10087 +       }
10088 +       return crp;
10089 +}
10090 +
10091 +/*
10092 + * Invoke the callback on behalf of the driver.
10093 + */
10094 +void
10095 +crypto_done(struct cryptop *crp)
10096 +{
10097 +       unsigned long q_flags;
10098 +
10099 +       dprintk("%s()\n", __FUNCTION__);
10100 +       if ((crp->crp_flags & CRYPTO_F_DONE) == 0) {
10101 +               crp->crp_flags |= CRYPTO_F_DONE;
10102 +               CRYPTO_Q_LOCK();
10103 +               crypto_q_cnt--;
10104 +               CRYPTO_Q_UNLOCK();
10105 +       } else
10106 +               printk("crypto: crypto_done op already done, flags 0x%x",
10107 +                               crp->crp_flags);
10108 +       if (crp->crp_etype != 0)
10109 +               cryptostats.cs_errs++;
10110 +       /*
10111 +        * CBIMM means unconditionally do the callback immediately;
10112 +        * CBIFSYNC means do the callback immediately only if the
10113 +        * operation was done synchronously.  Both are used to avoid
10114 +        * doing extraneous context switches; the latter is mostly
10115 +        * used with the software crypto driver.
10116 +        */
10117 +       if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
10118 +           ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
10119 +            (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
10120 +               /*
10121 +                * Do the callback directly.  This is ok when the
10122 +                * callback routine does very little (e.g. the
10123 +                * /dev/crypto callback method just does a wakeup).
10124 +                */
10125 +               crp->crp_callback(crp);
10126 +       } else {
10127 +               unsigned long r_flags;
10128 +               /*
10129 +                * Normal case; queue the callback for the thread.
10130 +                */
10131 +               CRYPTO_RETQ_LOCK();
10132 +               if (CRYPTO_RETQ_EMPTY())
10133 +                       wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
10134 +               TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
10135 +               CRYPTO_RETQ_UNLOCK();
10136 +       }
10137 +}
10138 +
10139 +/*
10140 + * Invoke the callback on behalf of the driver.
10141 + */
10142 +void
10143 +crypto_kdone(struct cryptkop *krp)
10144 +{
10145 +       struct cryptocap *cap;
10146 +       unsigned long d_flags;
10147 +
10148 +       if ((krp->krp_flags & CRYPTO_KF_DONE) != 0)
10149 +               printk("crypto: crypto_kdone op already done, flags 0x%x",
10150 +                               krp->krp_flags);
10151 +       krp->krp_flags |= CRYPTO_KF_DONE;
10152 +       if (krp->krp_status != 0)
10153 +               cryptostats.cs_kerrs++;
10154 +
10155 +       CRYPTO_DRIVER_LOCK();
10156 +       /* XXX: What if driver is loaded in the meantime? */
10157 +       if (krp->krp_hid < crypto_drivers_num) {
10158 +               cap = &crypto_drivers[krp->krp_hid];
10159 +               cap->cc_koperations--;
10160 +               KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
10161 +               if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
10162 +                       crypto_remove(cap);
10163 +       }
10164 +       CRYPTO_DRIVER_UNLOCK();
10165 +
10166 +       /*
10167 +        * CBIMM means unconditionally do the callback immediately;
10168 +        * This is used to avoid doing extraneous context switches
10169 +        */
10170 +       if ((krp->krp_flags & CRYPTO_KF_CBIMM)) {
10171 +               /*
10172 +                * Do the callback directly.  This is ok when the
10173 +                * callback routine does very little (e.g. the
10174 +                * /dev/crypto callback method just does a wakeup).
10175 +                */
10176 +               krp->krp_callback(krp);
10177 +       } else {
10178 +               unsigned long r_flags;
10179 +               /*
10180 +                * Normal case; queue the callback for the thread.
10181 +                */
10182 +               CRYPTO_RETQ_LOCK();
10183 +               if (CRYPTO_RETQ_EMPTY())
10184 +                       wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
10185 +               TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
10186 +               CRYPTO_RETQ_UNLOCK();
10187 +       }
10188 +}
10189 +
10190 +int
10191 +crypto_getfeat(int *featp)
10192 +{
10193 +       int hid, kalg, feat = 0;
10194 +       unsigned long d_flags;
10195 +
10196 +       CRYPTO_DRIVER_LOCK();
10197 +       for (hid = 0; hid < crypto_drivers_num; hid++) {
10198 +               const struct cryptocap *cap = &crypto_drivers[hid];
10199 +
10200 +               if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
10201 +                   !crypto_devallowsoft) {
10202 +                       continue;
10203 +               }
10204 +               for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
10205 +                       if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
10206 +                               feat |=  1 << kalg;
10207 +       }
10208 +       CRYPTO_DRIVER_UNLOCK();
10209 +       *featp = feat;
10210 +       return (0);
10211 +}
10212 +
10213 +/*
10214 + * Crypto thread, dispatches crypto requests.
10215 + */
10216 +static int
10217 +crypto_proc(void *arg)
10218 +{
10219 +       struct cryptop *crp, *submit;
10220 +       struct cryptkop *krp, *krpp;
10221 +       struct cryptocap *cap;
10222 +       u_int32_t hid;
10223 +       int result, hint;
10224 +       unsigned long q_flags;
10225 +
10226 +       ocf_daemonize("crypto");
10227 +
10228 +       CRYPTO_Q_LOCK();
10229 +       for (;;) {
10230 +               /*
10231 +                * we need to make sure we don't get into a busy loop with nothing
10232 +                * to do,  the two crypto_all_*blocked vars help us find out when
10233 +                * we are all full and can do nothing on any driver or Q.  If so we
10234 +                * wait for an unblock.
10235 +                */
10236 +               crypto_all_qblocked  = !list_empty(&crp_q);
10237 +
10238 +               /*
10239 +                * Find the first element in the queue that can be
10240 +                * processed and look-ahead to see if multiple ops
10241 +                * are ready for the same driver.
10242 +                */
10243 +               submit = NULL;
10244 +               hint = 0;
10245 +               list_for_each_entry(crp, &crp_q, crp_next) {
10246 +                       hid = CRYPTO_SESID2HID(crp->crp_sid);
10247 +                       cap = crypto_checkdriver(hid);
10248 +                       /*
10249 +                        * Driver cannot disappear when there is an active
10250 +                        * session.
10251 +                        */
10252 +                       KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
10253 +                           __func__, __LINE__));
10254 +                       if (cap == NULL || cap->cc_dev == NULL) {
10255 +                               /* Op needs to be migrated, process it. */
10256 +                               if (submit == NULL)
10257 +                                       submit = crp;
10258 +                               break;
10259 +                       }
10260 +                       if (!cap->cc_qblocked) {
10261 +                               if (submit != NULL) {
10262 +                                       /*
10263 +                                        * We stop on finding another op,
10264 +                                        * regardless whether its for the same
10265 +                                        * driver or not.  We could keep
10266 +                                        * searching the queue but it might be
10267 +                                        * better to just use a per-driver
10268 +                                        * queue instead.
10269 +                                        */
10270 +                                       if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
10271 +                                               hint = CRYPTO_HINT_MORE;
10272 +                                       break;
10273 +                               } else {
10274 +                                       submit = crp;
10275 +                                       if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
10276 +                                               break;
10277 +                                       /* keep scanning for more are q'd */
10278 +                               }
10279 +                       }
10280 +               }
10281 +               if (submit != NULL) {
10282 +                       hid = CRYPTO_SESID2HID(submit->crp_sid);
10283 +                       crypto_all_qblocked = 0;
10284 +                       list_del(&submit->crp_next);
10285 +                       crypto_drivers[hid].cc_qblocked = 1;
10286 +                       cap = crypto_checkdriver(hid);
10287 +                       CRYPTO_Q_UNLOCK();
10288 +                       KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
10289 +                           __func__, __LINE__));
10290 +                       result = crypto_invoke(cap, submit, hint);
10291 +                       CRYPTO_Q_LOCK();
10292 +                       if (result == ERESTART) {
10293 +                               /*
10294 +                                * The driver ran out of resources, mark the
10295 +                                * driver ``blocked'' for cryptop's and put
10296 +                                * the request back in the queue.  It would
10297 +                                * best to put the request back where we got
10298 +                                * it but that's hard so for now we put it
10299 +                                * at the front.  This should be ok; putting
10300 +                                * it at the end does not work.
10301 +                                */
10302 +                               /* XXX validate sid again? */
10303 +                               list_add(&submit->crp_next, &crp_q);
10304 +                               cryptostats.cs_blocks++;
10305 +                       } else
10306 +                               crypto_drivers[hid].cc_qblocked=0;
10307 +               }
10308 +
10309 +               crypto_all_kqblocked = !list_empty(&crp_kq);
10310 +
10311 +               /* As above, but for key ops */
10312 +               krp = NULL;
10313 +               list_for_each_entry(krpp, &crp_kq, krp_next) {
10314 +                       cap = crypto_checkdriver(krpp->krp_hid);
10315 +                       if (cap == NULL || cap->cc_dev == NULL) {
10316 +                               /*
10317 +                                * Operation needs to be migrated, invalidate
10318 +                                * the assigned device so it will reselect a
10319 +                                * new one below.  Propagate the original
10320 +                                * crid selection flags if supplied.
10321 +                                */
10322 +                               krp->krp_hid = krp->krp_crid &
10323 +                                   (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
10324 +                               if (krp->krp_hid == 0)
10325 +                                       krp->krp_hid =
10326 +                                   CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
10327 +                               break;
10328 +                       }
10329 +                       if (!cap->cc_kqblocked) {
10330 +                               krp = krpp;
10331 +                               break;
10332 +                       }
10333 +               }
10334 +               if (krp != NULL) {
10335 +                       crypto_all_kqblocked = 0;
10336 +                       list_del(&krp->krp_next);
10337 +                       crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
10338 +                       CRYPTO_Q_UNLOCK();
10339 +                       result = crypto_kinvoke(krp, krp->krp_hid);
10340 +                       CRYPTO_Q_LOCK();
10341 +                       if (result == ERESTART) {
10342 +                               /*
10343 +                                * The driver ran out of resources, mark the
10344 +                                * driver ``blocked'' for cryptkop's and put
10345 +                                * the request back in the queue.  It would
10346 +                                * best to put the request back where we got
10347 +                                * it but that's hard so for now we put it
10348 +                                * at the front.  This should be ok; putting
10349 +                                * it at the end does not work.
10350 +                                */
10351 +                               /* XXX validate sid again? */
10352 +                               list_add(&krp->krp_next, &crp_kq);
10353 +                               cryptostats.cs_kblocks++;
10354 +                       } else
10355 +                               crypto_drivers[krp->krp_hid].cc_kqblocked = 0;
10356 +               }
10357 +
10358 +               if (submit == NULL && krp == NULL) {
10359 +                       /*
10360 +                        * Nothing more to be processed.  Sleep until we're
10361 +                        * woken because there are more ops to process.
10362 +                        * This happens either by submission or by a driver
10363 +                        * becoming unblocked and notifying us through
10364 +                        * crypto_unblock.  Note that when we wakeup we
10365 +                        * start processing each queue again from the
10366 +                        * front. It's not clear that it's important to
10367 +                        * preserve this ordering since ops may finish
10368 +                        * out of order if dispatched to different devices
10369 +                        * and some become blocked while others do not.
10370 +                        */
10371 +                       dprintk("%s - sleeping (qe=%d qb=%d kqe=%d kqb=%d)\n",
10372 +                                       __FUNCTION__,
10373 +                                       list_empty(&crp_q), crypto_all_qblocked,
10374 +                                       list_empty(&crp_kq), crypto_all_kqblocked);
10375 +                       CRYPTO_Q_UNLOCK();
10376 +                       crp_sleep = 1;
10377 +                       wait_event_interruptible(cryptoproc_wait,
10378 +                                       !(list_empty(&crp_q) || crypto_all_qblocked) ||
10379 +                                       !(list_empty(&crp_kq) || crypto_all_kqblocked) ||
10380 +                                       cryptoproc == (pid_t) -1);
10381 +                       crp_sleep = 0;
10382 +                       if (signal_pending (current)) {
10383 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10384 +                               spin_lock_irq(&current->sigmask_lock);
10385 +#endif
10386 +                               flush_signals(current);
10387 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10388 +                               spin_unlock_irq(&current->sigmask_lock);
10389 +#endif
10390 +                       }
10391 +                       CRYPTO_Q_LOCK();
10392 +                       dprintk("%s - awake\n", __FUNCTION__);
10393 +                       if (cryptoproc == (pid_t) -1)
10394 +                               break;
10395 +                       cryptostats.cs_intrs++;
10396 +               }
10397 +       }
10398 +       CRYPTO_Q_UNLOCK();
10399 +       complete_and_exit(&cryptoproc_exited, 0);
10400 +}
10401 +
10402 +/*
10403 + * Crypto returns thread, does callbacks for processed crypto requests.
10404 + * Callbacks are done here, rather than in the crypto drivers, because
10405 + * callbacks typically are expensive and would slow interrupt handling.
10406 + */
10407 +static int
10408 +crypto_ret_proc(void *arg)
10409 +{
10410 +       struct cryptop *crpt;
10411 +       struct cryptkop *krpt;
10412 +       unsigned long  r_flags;
10413 +
10414 +       ocf_daemonize("crypto_ret");
10415 +
10416 +       CRYPTO_RETQ_LOCK();
10417 +       for (;;) {
10418 +               /* Harvest return q's for completed ops */
10419 +               crpt = NULL;
10420 +               if (!list_empty(&crp_ret_q))
10421 +                       crpt = list_entry(crp_ret_q.next, typeof(*crpt), crp_next);
10422 +               if (crpt != NULL)
10423 +                       list_del(&crpt->crp_next);
10424 +
10425 +               krpt = NULL;
10426 +               if (!list_empty(&crp_ret_kq))
10427 +                       krpt = list_entry(crp_ret_kq.next, typeof(*krpt), krp_next);
10428 +               if (krpt != NULL)
10429 +                       list_del(&krpt->krp_next);
10430 +
10431 +               if (crpt != NULL || krpt != NULL) {
10432 +                       CRYPTO_RETQ_UNLOCK();
10433 +                       /*
10434 +                        * Run callbacks unlocked.
10435 +                        */
10436 +                       if (crpt != NULL)
10437 +                               crpt->crp_callback(crpt);
10438 +                       if (krpt != NULL)
10439 +                               krpt->krp_callback(krpt);
10440 +                       CRYPTO_RETQ_LOCK();
10441 +               } else {
10442 +                       /*
10443 +                        * Nothing more to be processed.  Sleep until we're
10444 +                        * woken because there are more returns to process.
10445 +                        */
10446 +                       dprintk("%s - sleeping\n", __FUNCTION__);
10447 +                       CRYPTO_RETQ_UNLOCK();
10448 +                       wait_event_interruptible(cryptoretproc_wait,
10449 +                                       cryptoretproc == (pid_t) -1 ||
10450 +                                       !list_empty(&crp_ret_q) ||
10451 +                                       !list_empty(&crp_ret_kq));
10452 +                       if (signal_pending (current)) {
10453 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10454 +                               spin_lock_irq(&current->sigmask_lock);
10455 +#endif
10456 +                               flush_signals(current);
10457 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10458 +                               spin_unlock_irq(&current->sigmask_lock);
10459 +#endif
10460 +                       }
10461 +                       CRYPTO_RETQ_LOCK();
10462 +                       dprintk("%s - awake\n", __FUNCTION__);
10463 +                       if (cryptoretproc == (pid_t) -1) {
10464 +                               dprintk("%s - EXITING!\n", __FUNCTION__);
10465 +                               break;
10466 +                       }
10467 +                       cryptostats.cs_rets++;
10468 +               }
10469 +       }
10470 +       CRYPTO_RETQ_UNLOCK();
10471 +       complete_and_exit(&cryptoretproc_exited, 0);
10472 +}
10473 +
10474 +
10475 +#if 0 /* should put this into /proc or something */
10476 +static void
10477 +db_show_drivers(void)
10478 +{
10479 +       int hid;
10480 +
10481 +       db_printf("%12s %4s %4s %8s %2s %2s\n"
10482 +               , "Device"
10483 +               , "Ses"
10484 +               , "Kops"
10485 +               , "Flags"
10486 +               , "QB"
10487 +               , "KB"
10488 +       );
10489 +       for (hid = 0; hid < crypto_drivers_num; hid++) {
10490 +               const struct cryptocap *cap = &crypto_drivers[hid];
10491 +               if (cap->cc_dev == NULL)
10492 +                       continue;
10493 +               db_printf("%-12s %4u %4u %08x %2u %2u\n"
10494 +                   , device_get_nameunit(cap->cc_dev)
10495 +                   , cap->cc_sessions
10496 +                   , cap->cc_koperations
10497 +                   , cap->cc_flags
10498 +                   , cap->cc_qblocked
10499 +                   , cap->cc_kqblocked
10500 +               );
10501 +       }
10502 +}
10503 +
10504 +DB_SHOW_COMMAND(crypto, db_show_crypto)
10505 +{
10506 +       struct cryptop *crp;
10507 +
10508 +       db_show_drivers();
10509 +       db_printf("\n");
10510 +
10511 +       db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
10512 +           "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
10513 +           "Desc", "Callback");
10514 +       TAILQ_FOREACH(crp, &crp_q, crp_next) {
10515 +               db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
10516 +                   , (int) CRYPTO_SESID2HID(crp->crp_sid)
10517 +                   , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
10518 +                   , crp->crp_ilen, crp->crp_olen
10519 +                   , crp->crp_etype
10520 +                   , crp->crp_flags
10521 +                   , crp->crp_desc
10522 +                   , crp->crp_callback
10523 +               );
10524 +       }
10525 +       if (!TAILQ_EMPTY(&crp_ret_q)) {
10526 +               db_printf("\n%4s %4s %4s %8s\n",
10527 +                   "HID", "Etype", "Flags", "Callback");
10528 +               TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
10529 +                       db_printf("%4u %4u %04x %8p\n"
10530 +                           , (int) CRYPTO_SESID2HID(crp->crp_sid)
10531 +                           , crp->crp_etype
10532 +                           , crp->crp_flags
10533 +                           , crp->crp_callback
10534 +                       );
10535 +               }
10536 +       }
10537 +}
10538 +
10539 +DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
10540 +{
10541 +       struct cryptkop *krp;
10542 +
10543 +       db_show_drivers();
10544 +       db_printf("\n");
10545 +
10546 +       db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
10547 +           "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
10548 +       TAILQ_FOREACH(krp, &crp_kq, krp_next) {
10549 +               db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
10550 +                   , krp->krp_op
10551 +                   , krp->krp_status
10552 +                   , krp->krp_iparams, krp->krp_oparams
10553 +                   , krp->krp_crid, krp->krp_hid
10554 +                   , krp->krp_callback
10555 +               );
10556 +       }
10557 +       if (!TAILQ_EMPTY(&crp_ret_q)) {
10558 +               db_printf("%4s %5s %8s %4s %8s\n",
10559 +                   "Op", "Status", "CRID", "HID", "Callback");
10560 +               TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
10561 +                       db_printf("%4u %5u %08x %4u %8p\n"
10562 +                           , krp->krp_op
10563 +                           , krp->krp_status
10564 +                           , krp->krp_crid, krp->krp_hid
10565 +                           , krp->krp_callback
10566 +                       );
10567 +               }
10568 +       }
10569 +}
10570 +#endif
10571 +
10572 +
10573 +static int
10574 +crypto_init(void)
10575 +{
10576 +       int error;
10577 +
10578 +       dprintk("%s(0x%x)\n", __FUNCTION__, (int) crypto_init);
10579 +
10580 +       if (crypto_initted)
10581 +               return 0;
10582 +       crypto_initted = 1;
10583 +
10584 +       spin_lock_init(&crypto_drivers_lock);
10585 +       spin_lock_init(&crypto_q_lock);
10586 +       spin_lock_init(&crypto_ret_q_lock);
10587 +
10588 +       cryptop_zone = kmem_cache_create("cryptop", sizeof(struct cryptop),
10589 +                                      0, SLAB_HWCACHE_ALIGN, NULL
10590 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
10591 +                                      , NULL
10592 +#endif
10593 +                                       );
10594 +
10595 +       cryptodesc_zone = kmem_cache_create("cryptodesc", sizeof(struct cryptodesc),
10596 +                                      0, SLAB_HWCACHE_ALIGN, NULL
10597 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
10598 +                                      , NULL
10599 +#endif
10600 +                                       );
10601 +
10602 +       if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
10603 +               printk("crypto: crypto_init cannot setup crypto zones\n");
10604 +               error = ENOMEM;
10605 +               goto bad;
10606 +       }
10607 +
10608 +       crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
10609 +       crypto_drivers = kmalloc(crypto_drivers_num * sizeof(struct cryptocap),
10610 +                       GFP_KERNEL);
10611 +       if (crypto_drivers == NULL) {
10612 +               printk("crypto: crypto_init cannot setup crypto drivers\n");
10613 +               error = ENOMEM;
10614 +               goto bad;
10615 +       }
10616 +
10617 +       memset(crypto_drivers, 0, crypto_drivers_num * sizeof(struct cryptocap));
10618 +
10619 +       init_completion(&cryptoproc_exited);
10620 +       init_completion(&cryptoretproc_exited);
10621 +
10622 +       cryptoproc = 0; /* to avoid race condition where proc runs first */
10623 +       cryptoproc = kernel_thread(crypto_proc, NULL, CLONE_FS|CLONE_FILES);
10624 +       if (cryptoproc < 0) {
10625 +               error = cryptoproc;
10626 +               printk("crypto: crypto_init cannot start crypto thread; error %d",
10627 +                       error);
10628 +               goto bad;
10629 +       }
10630 +
10631 +       cryptoretproc = 0; /* to avoid race condition where proc runs first */
10632 +       cryptoretproc = kernel_thread(crypto_ret_proc, NULL, CLONE_FS|CLONE_FILES);
10633 +       if (cryptoretproc < 0) {
10634 +               error = cryptoretproc;
10635 +               printk("crypto: crypto_init cannot start cryptoret thread; error %d",
10636 +                               error);
10637 +               goto bad;
10638 +       }
10639 +
10640 +       return 0;
10641 +bad:
10642 +       crypto_exit();
10643 +       return error;
10644 +}
10645 +
10646 +
10647 +static void
10648 +crypto_exit(void)
10649 +{
10650 +       pid_t p;
10651 +       unsigned long d_flags;
10652 +
10653 +       dprintk("%s()\n", __FUNCTION__);
10654 +
10655 +       /*
10656 +        * Terminate any crypto threads.
10657 +        */
10658 +
10659 +       CRYPTO_DRIVER_LOCK();
10660 +       p = cryptoproc;
10661 +       cryptoproc = (pid_t) -1;
10662 +       kill_proc(p, SIGTERM, 1);
10663 +       wake_up_interruptible(&cryptoproc_wait);
10664 +       CRYPTO_DRIVER_UNLOCK();
10665 +
10666 +       wait_for_completion(&cryptoproc_exited);
10667 +
10668 +       CRYPTO_DRIVER_LOCK();
10669 +       p = cryptoretproc;
10670 +       cryptoretproc = (pid_t) -1;
10671 +       kill_proc(p, SIGTERM, 1);
10672 +       wake_up_interruptible(&cryptoretproc_wait);
10673 +       CRYPTO_DRIVER_UNLOCK();
10674 +
10675 +       wait_for_completion(&cryptoretproc_exited);
10676 +
10677 +       /* XXX flush queues??? */
10678 +
10679 +       /* 
10680 +        * Reclaim dynamically allocated resources.
10681 +        */
10682 +       if (crypto_drivers != NULL)
10683 +               kfree(crypto_drivers);
10684 +
10685 +       if (cryptodesc_zone != NULL)
10686 +               kmem_cache_destroy(cryptodesc_zone);
10687 +       if (cryptop_zone != NULL)
10688 +               kmem_cache_destroy(cryptop_zone);
10689 +}
10690 +
10691 +
10692 +EXPORT_SYMBOL(crypto_newsession);
10693 +EXPORT_SYMBOL(crypto_freesession);
10694 +EXPORT_SYMBOL(crypto_get_driverid);
10695 +EXPORT_SYMBOL(crypto_kregister);
10696 +EXPORT_SYMBOL(crypto_register);
10697 +EXPORT_SYMBOL(crypto_unregister);
10698 +EXPORT_SYMBOL(crypto_unregister_all);
10699 +EXPORT_SYMBOL(crypto_unblock);
10700 +EXPORT_SYMBOL(crypto_dispatch);
10701 +EXPORT_SYMBOL(crypto_kdispatch);
10702 +EXPORT_SYMBOL(crypto_freereq);
10703 +EXPORT_SYMBOL(crypto_getreq);
10704 +EXPORT_SYMBOL(crypto_done);
10705 +EXPORT_SYMBOL(crypto_kdone);
10706 +EXPORT_SYMBOL(crypto_getfeat);
10707 +EXPORT_SYMBOL(crypto_userasymcrypto);
10708 +EXPORT_SYMBOL(crypto_getcaps);
10709 +EXPORT_SYMBOL(crypto_find_driver);
10710 +EXPORT_SYMBOL(crypto_find_device_byhid);
10711 +
10712 +module_init(crypto_init);
10713 +module_exit(crypto_exit);
10714 +
10715 +MODULE_LICENSE("BSD");
10716 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
10717 +MODULE_DESCRIPTION("OCF (OpenBSD Cryptographic Framework)");
10718 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
10719 +++ linux/crypto/ocf/criov.c    2007-07-18 13:01:47.000000000 +1000
10720 @@ -0,0 +1,215 @@
10721 +/*      $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $        */
10722 +
10723 +/*
10724 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
10725 + * Copyright (C) 2006-2007 David McCullough
10726 + * Copyright (C) 2004-2005 Intel Corporation.
10727 + * The license and original author are listed below.
10728 + *
10729 + * Copyright (c) 1999 Theo de Raadt
10730 + *
10731 + * Redistribution and use in source and binary forms, with or without
10732 + * modification, are permitted provided that the following conditions
10733 + * are met:
10734 + *
10735 + * 1. Redistributions of source code must retain the above copyright
10736 + *   notice, this list of conditions and the following disclaimer.
10737 + * 2. Redistributions in binary form must reproduce the above copyright
10738 + *   notice, this list of conditions and the following disclaimer in the
10739 + *   documentation and/or other materials provided with the distribution.
10740 + * 3. The name of the author may not be used to endorse or promote products
10741 + *   derived from this software without specific prior written permission.
10742 + *
10743 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
10744 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
10745 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
10746 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
10747 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
10748 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
10749 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
10750 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
10751 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
10752 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10753 + *
10754 +__FBSDID("$FreeBSD: src/sys/opencrypto/criov.c,v 1.5 2006/06/04 22:15:13 pjd Exp $");
10755 + */
10756 +
10757 +#ifndef AUTOCONF_INCLUDED
10758 +#include <linux/config.h>
10759 +#endif
10760 +#include <linux/module.h>
10761 +#include <linux/init.h>
10762 +#include <linux/slab.h>
10763 +#include <linux/uio.h>
10764 +#include <linux/skbuff.h>
10765 +#include <linux/kernel.h>
10766 +#include <linux/mm.h>
10767 +#include <asm/io.h>
10768 +
10769 +#include <uio.h>
10770 +#include <cryptodev.h>
10771 +
10772 +/*
10773 + * This macro is only for avoiding code duplication, as we need to skip
10774 + * given number of bytes in the same way in three functions below.
10775 + */
10776 +#define        CUIO_SKIP()     do {                                            \
10777 +       KASSERT(off >= 0, ("%s: off %d < 0", __func__, off));           \
10778 +       KASSERT(len >= 0, ("%s: len %d < 0", __func__, len));           \
10779 +       while (off > 0) {                                               \
10780 +               KASSERT(iol >= 0, ("%s: empty in skip", __func__));     \
10781 +               if (off < iov->iov_len)                                 \
10782 +                       break;                                          \
10783 +               off -= iov->iov_len;                                    \
10784 +               iol--;                                                  \
10785 +               iov++;                                                  \
10786 +       }                                                               \
10787 +} while (0)
10788 +
10789 +void
10790 +cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
10791 +{
10792 +       struct iovec *iov = uio->uio_iov;
10793 +       int iol = uio->uio_iovcnt;
10794 +       unsigned count;
10795 +
10796 +       CUIO_SKIP();
10797 +       while (len > 0) {
10798 +               KASSERT(iol >= 0, ("%s: empty", __func__));
10799 +               count = min((int)(iov->iov_len - off), len);
10800 +               memcpy(cp, ((caddr_t)iov->iov_base) + off, count);
10801 +               len -= count;
10802 +               cp += count;
10803 +               off = 0;
10804 +               iol--;
10805 +               iov++;
10806 +       }
10807 +}
10808 +
10809 +void
10810 +cuio_copyback(struct uio* uio, int off, int len, caddr_t cp)
10811 +{
10812 +       struct iovec *iov = uio->uio_iov;
10813 +       int iol = uio->uio_iovcnt;
10814 +       unsigned count;
10815 +
10816 +       CUIO_SKIP();
10817 +       while (len > 0) {
10818 +               KASSERT(iol >= 0, ("%s: empty", __func__));
10819 +               count = min((int)(iov->iov_len - off), len);
10820 +               memcpy(((caddr_t)iov->iov_base) + off, cp, count);
10821 +               len -= count;
10822 +               cp += count;
10823 +               off = 0;
10824 +               iol--;
10825 +               iov++;
10826 +       }
10827 +}
10828 +
10829 +/*
10830 + * Return a pointer to iov/offset of location in iovec list.
10831 + */
10832 +struct iovec *
10833 +cuio_getptr(struct uio *uio, int loc, int *off)
10834 +{
10835 +       struct iovec *iov = uio->uio_iov;
10836 +       int iol = uio->uio_iovcnt;
10837 +
10838 +       while (loc >= 0) {
10839 +               /* Normal end of search */
10840 +               if (loc < iov->iov_len) {
10841 +                       *off = loc;
10842 +                       return (iov);
10843 +               }
10844 +
10845 +               loc -= iov->iov_len;
10846 +               if (iol == 0) {
10847 +                       if (loc == 0) {
10848 +                               /* Point at the end of valid data */
10849 +                               *off = iov->iov_len;
10850 +                               return (iov);
10851 +                       } else
10852 +                               return (NULL);
10853 +               } else {
10854 +                       iov++, iol--;
10855 +               }
10856 +       }
10857 +
10858 +       return (NULL);
10859 +}
10860 +
10861 +EXPORT_SYMBOL(cuio_copyback);
10862 +EXPORT_SYMBOL(cuio_copydata);
10863 +EXPORT_SYMBOL(cuio_getptr);
10864 +
10865 +
10866 +static void
10867 +skb_copy_bits_back(struct sk_buff *skb, int offset, caddr_t cp, int len)
10868 +{
10869 +       int i;
10870 +       if (offset < skb_headlen(skb)) {
10871 +               memcpy(skb->data + offset, cp, min_t(int, skb_headlen(skb), len));
10872 +               len -= skb_headlen(skb);
10873 +               cp += skb_headlen(skb);
10874 +       }
10875 +       offset -= skb_headlen(skb);
10876 +       for (i = 0; len > 0 && i < skb_shinfo(skb)->nr_frags; i++) {
10877 +               if (offset < skb_shinfo(skb)->frags[i].size) {
10878 +                       memcpy(page_address(skb_shinfo(skb)->frags[i].page) +
10879 +                                       skb_shinfo(skb)->frags[i].page_offset,
10880 +                                       cp, min_t(int, skb_shinfo(skb)->frags[i].size, len));
10881 +                       len -= skb_shinfo(skb)->frags[i].size;
10882 +                       cp += skb_shinfo(skb)->frags[i].size;
10883 +               }
10884 +               offset -= skb_shinfo(skb)->frags[i].size;
10885 +       }
10886 +}
10887 +
10888 +void
10889 +crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in)
10890 +{
10891 +
10892 +       if ((flags & CRYPTO_F_SKBUF) != 0)
10893 +               skb_copy_bits_back((struct sk_buff *)buf, off, in, size);
10894 +       else if ((flags & CRYPTO_F_IOV) != 0)
10895 +               cuio_copyback((struct uio *)buf, off, size, in);
10896 +       else
10897 +               bcopy(in, buf + off, size);
10898 +}
10899 +
10900 +void
10901 +crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
10902 +{
10903 +
10904 +       if ((flags & CRYPTO_F_SKBUF) != 0)
10905 +               skb_copy_bits((struct sk_buff *)buf, off, out, size);
10906 +       else if ((flags & CRYPTO_F_IOV) != 0)
10907 +               cuio_copydata((struct uio *)buf, off, size, out);
10908 +       else
10909 +               bcopy(buf + off, out, size);
10910 +}
10911 +
10912 +int
10913 +crypto_apply(int flags, caddr_t buf, int off, int len,
10914 +    int (*f)(void *, void *, u_int), void *arg)
10915 +{
10916 +#if 0
10917 +       int error;
10918 +
10919 +       if ((flags & CRYPTO_F_SKBUF) != 0)
10920 +               error = XXXXXX((struct mbuf *)buf, off, len, f, arg);
10921 +       else if ((flags & CRYPTO_F_IOV) != 0)
10922 +               error = cuio_apply((struct uio *)buf, off, len, f, arg);
10923 +       else
10924 +               error = (*f)(arg, buf + off, len);
10925 +       return (error);
10926 +#else
10927 +       KASSERT(0, ("crypto_apply not implemented!\n"));
10928 +#endif
10929 +       return 0;
10930 +}
10931 +
10932 +EXPORT_SYMBOL(crypto_copyback);
10933 +EXPORT_SYMBOL(crypto_copydata);
10934 +EXPORT_SYMBOL(crypto_apply);
10935 +
10936 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
10937 +++ linux/crypto/ocf/uio.h      2007-07-03 09:52:33.000000000 +1000
10938 @@ -0,0 +1,54 @@
10939 +#ifndef _OCF_UIO_H_
10940 +#define _OCF_UIO_H_
10941 +
10942 +#include <linux/uio.h>
10943 +
10944 +/*
10945 + * The linux uio.h doesn't have all we need.  To be fully api compatible
10946 + * with the BSD cryptodev,  we need to keep this around.  Perhaps this can
10947 + * be moved back into the linux/uio.h
10948 + *
10949 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
10950 + * Copyright (C) 2006-2007 David McCullough
10951 + * Copyright (C) 2004-2005 Intel Corporation.
10952 + *
10953 + * LICENSE TERMS
10954 + *
10955 + * The free distribution and use of this software in both source and binary
10956 + * form is allowed (with or without changes) provided that:
10957 + *
10958 + *   1. distributions of this source code include the above copyright
10959 + *      notice, this list of conditions and the following disclaimer;
10960 + *
10961 + *   2. distributions in binary form include the above copyright
10962 + *      notice, this list of conditions and the following disclaimer
10963 + *      in the documentation and/or other associated materials;
10964 + *
10965 + *   3. the copyright holder's name is not used to endorse products
10966 + *      built using this software without specific written permission.
10967 + *
10968 + * ALTERNATIVELY, provided that this notice is retained in full, this product
10969 + * may be distributed under the terms of the GNU General Public License (GPL),
10970 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
10971 + *
10972 + * DISCLAIMER
10973 + *
10974 + * This software is provided 'as is' with no explicit or implied warranties
10975 + * in respect of its properties, including, but not limited to, correctness
10976 + * and/or fitness for purpose.
10977 + * ---------------------------------------------------------------------------
10978 + */
10979 +
10980 +struct uio {
10981 +       struct  iovec *uio_iov;
10982 +       int             uio_iovcnt;
10983 +       off_t   uio_offset;
10984 +       int             uio_resid;
10985 +#if 0
10986 +       enum    uio_seg uio_segflg;
10987 +       enum    uio_rw uio_rw;
10988 +       struct  thread *uio_td;
10989 +#endif
10990 +};
10991 +
10992 +#endif
10993 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
10994 +++ linux/crypto/ocf/talitos/talitos.c  2008-04-01 15:48:31.000000000 +1000
10995 @@ -0,0 +1,1359 @@
10996 +/*
10997 + * crypto/ocf/talitos/talitos.c
10998 + *
10999 + * An OCF-Linux module that uses Freescale's SEC to do the crypto.
11000 + * Based on crypto/ocf/hifn and crypto/ocf/safe OCF drivers
11001 + *
11002 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
11003 + *
11004 + * This code written by Kim A. B. Phillips <kim.phillips@freescale.com>
11005 + * some code copied from files with the following:
11006 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com
11007 + *
11008 + * Redistribution and use in source and binary forms, with or without
11009 + * modification, are permitted provided that the following conditions
11010 + * are met:
11011 + *
11012 + * 1. Redistributions of source code must retain the above copyright
11013 + *    notice, this list of conditions and the following disclaimer.
11014 + * 2. Redistributions in binary form must reproduce the above copyright
11015 + *    notice, this list of conditions and the following disclaimer in the
11016 + *    documentation and/or other materials provided with the distribution.
11017 + * 3. The name of the author may not be used to endorse or promote products
11018 + *    derived from this software without specific prior written permission.
11019 + *
11020 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
11021 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
11022 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
11023 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
11024 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
11025 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
11026 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
11027 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11028 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
11029 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11030 + *
11031 + * ---------------------------------------------------------------------------
11032 + *
11033 + * NOTES:
11034 + *
11035 + * The Freescale SEC (also known as 'talitos') resides on the
11036 + * internal bus, and runs asynchronous to the processor core.  It has
11037 + * a wide gamut of cryptographic acceleration features, including single-
11038 + * pass IPsec (also known as algorithm chaining).  To properly utilize 
11039 + * all of the SEC's performance enhancing features, further reworking 
11040 + * of higher level code (framework, applications) will be necessary.
11041 + *
11042 + * The following table shows which SEC version is present in which devices:
11043 + * 
11044 + * Devices       SEC version
11045 + *
11046 + * 8272, 8248    SEC 1.0
11047 + * 885, 875      SEC 1.2
11048 + * 8555E, 8541E  SEC 2.0
11049 + * 8349E         SEC 2.01
11050 + * 8548E         SEC 2.1
11051 + *
11052 + * The following table shows the features offered by each SEC version:
11053 + *
11054 + *                            Max.   chan-
11055 + * version  Bus I/F       Clock  nels  DEU AESU AFEU MDEU PKEU RNG KEU
11056 + *
11057 + * SEC 1.0  internal 64b  100MHz   4     1    1    1    1    1   1   0
11058 + * SEC 1.2  internal 32b   66MHz   1     1    1    0    1    0   0   0
11059 + * SEC 2.0  internal 64b  166MHz   4     1    1    1    1    1   1   0
11060 + * SEC 2.01 internal 64b  166MHz   4     1    1    1    1    1   1   0
11061 + * SEC 2.1  internal 64b  333MHz   4     1    1    1    1    1   1   1
11062 + *
11063 + * Each execution unit in the SEC has two modes of execution; channel and
11064 + * slave/debug.  This driver employs the channel infrastructure in the
11065 + * device for convenience.  Only the RNG is directly accessed due to the
11066 + * convenience of its random fifo pool.  The relationship between the
11067 + * channels and execution units is depicted in the following diagram:
11068 + *
11069 + *    -------   ------------
11070 + * ---| ch0 |---|          |
11071 + *    -------   |          |
11072 + *              |          |------+-------+-------+-------+------------
11073 + *    -------   |          |      |       |       |       |           |
11074 + * ---| ch1 |---|          |      |       |       |       |           |
11075 + *    -------   |          |   ------  ------  ------  ------      ------
11076 + *              |controller|   |DEU |  |AESU|  |MDEU|  |PKEU| ...  |RNG |
11077 + *    -------   |          |   ------  ------  ------  ------      ------
11078 + * ---| ch2 |---|          |      |       |       |       |           |
11079 + *    -------   |          |      |       |       |       |           |
11080 + *              |          |------+-------+-------+-------+------------
11081 + *    -------   |          |
11082 + * ---| ch3 |---|          |
11083 + *    -------   ------------
11084 + *
11085 + * Channel ch0 may drive an aes operation to the aes unit (AESU),
11086 + * and, at the same time, ch1 may drive a message digest operation
11087 + * to the mdeu. Each channel has an input descriptor FIFO, and the 
11088 + * FIFO can contain, e.g. on the 8541E, up to 24 entries, before a
11089 + * a buffer overrun error is triggered. The controller is responsible
11090 + * for fetching the data from descriptor pointers, and passing the 
11091 + * data to the appropriate EUs. The controller also writes the 
11092 + * cryptographic operation's result to memory. The SEC notifies 
11093 + * completion by triggering an interrupt and/or setting the 1st byte 
11094 + * of the hdr field to 0xff.
11095 + *
11096 + * TODO:
11097 + * o support more algorithms
11098 + * o support more versions of the SEC
11099 + * o add support for linux 2.4
11100 + * o scatter-gather (sg) support
11101 + * o add support for public key ops (PKEU)
11102 + * o add statistics
11103 + */
11104 +
11105 +#ifndef AUTOCONF_INCLUDED
11106 +#include <linux/config.h>
11107 +#endif
11108 +#include <linux/module.h>
11109 +#include <linux/init.h>
11110 +#include <linux/interrupt.h>
11111 +#include <linux/spinlock.h>
11112 +#include <linux/random.h>
11113 +#include <linux/skbuff.h>
11114 +#include <asm/scatterlist.h>
11115 +#include <linux/dma-mapping.h>  /* dma_map_single() */
11116 +#include <linux/moduleparam.h>
11117 +
11118 +#include <linux/version.h>
11119 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
11120 +#include <linux/platform_device.h>
11121 +#endif
11122 +
11123 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
11124 +#include <linux/of_platform.h>
11125 +#endif
11126 +
11127 +#include <cryptodev.h>
11128 +#include <uio.h>
11129 +
11130 +#define DRV_NAME "talitos" 
11131 +
11132 +#include "talitos_dev.h"
11133 +#include "talitos_soft.h"
11134 +
11135 +#define read_random(p,l) get_random_bytes(p,l)
11136 +
11137 +const char talitos_driver_name[] = "Talitos OCF";
11138 +const char talitos_driver_version[] = "0.2";
11139 +
11140 +static int talitos_newsession(device_t dev, u_int32_t *sidp,
11141 +                                                               struct cryptoini *cri);
11142 +static int talitos_freesession(device_t dev, u_int64_t tid);
11143 +static int talitos_process(device_t dev, struct cryptop *crp, int hint);
11144 +static void dump_talitos_status(struct talitos_softc *sc);
11145 +static int talitos_submit(struct talitos_softc *sc, struct talitos_desc *td, 
11146 +                                                               int chsel);
11147 +static void talitos_doneprocessing(struct talitos_softc *sc);
11148 +static void talitos_init_device(struct talitos_softc *sc);
11149 +static void talitos_reset_device_master(struct talitos_softc *sc);
11150 +static void talitos_reset_device(struct talitos_softc *sc);
11151 +static void talitos_errorprocessing(struct talitos_softc *sc);
11152 +#ifdef CONFIG_PPC_MERGE
11153 +static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match);
11154 +static int talitos_remove(struct of_device *ofdev);
11155 +#else
11156 +static int talitos_probe(struct platform_device *pdev);
11157 +static int talitos_remove(struct platform_device *pdev);
11158 +#endif
11159 +#ifdef CONFIG_OCF_RANDOMHARVEST
11160 +static int talitos_read_random(void *arg, u_int32_t *buf, int maxwords);
11161 +static void talitos_rng_init(struct talitos_softc *sc);
11162 +#endif
11163 +
11164 +static device_method_t talitos_methods = {
11165 +       /* crypto device methods */
11166 +       DEVMETHOD(cryptodev_newsession, talitos_newsession),
11167 +       DEVMETHOD(cryptodev_freesession,talitos_freesession),
11168 +       DEVMETHOD(cryptodev_process,    talitos_process),
11169 +};
11170 +
11171 +#define debug talitos_debug
11172 +int talitos_debug = 0;
11173 +module_param(talitos_debug, int, 0644);
11174 +MODULE_PARM_DESC(talitos_debug, "Enable debug");
11175 +
11176 +static inline void talitos_write(volatile unsigned *addr, u32 val)
11177 +{
11178 +        out_be32(addr, val);
11179 +}
11180 +
11181 +static inline u32 talitos_read(volatile unsigned *addr)
11182 +{
11183 +        u32 val;
11184 +        val = in_be32(addr);
11185 +        return val;
11186 +}
11187 +
11188 +static void dump_talitos_status(struct talitos_softc *sc)
11189 +{
11190 +       unsigned int v, v_hi, i, *ptr;
11191 +       v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
11192 +       v_hi = talitos_read(sc->sc_base_addr + TALITOS_MCR_HI);
11193 +       printk(KERN_INFO "%s: MCR          0x%08x_%08x\n",
11194 +                       device_get_nameunit(sc->sc_cdev), v, v_hi);
11195 +       v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
11196 +       v_hi = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
11197 +       printk(KERN_INFO "%s: IMR          0x%08x_%08x\n",
11198 +                       device_get_nameunit(sc->sc_cdev), v, v_hi);
11199 +       v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
11200 +       v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
11201 +       printk(KERN_INFO "%s: ISR          0x%08x_%08x\n",
11202 +                       device_get_nameunit(sc->sc_cdev), v, v_hi);
11203 +       for (i = 0; i < sc->sc_num_channels; i++) { 
11204 +               v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET + 
11205 +                       TALITOS_CH_CDPR);
11206 +               v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET + 
11207 +                       TALITOS_CH_CDPR_HI);
11208 +               printk(KERN_INFO "%s: CDPR     ch%d 0x%08x_%08x\n", 
11209 +                               device_get_nameunit(sc->sc_cdev), i, v, v_hi);
11210 +       }
11211 +       for (i = 0; i < sc->sc_num_channels; i++) { 
11212 +               v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET + 
11213 +                       TALITOS_CH_CCPSR);
11214 +               v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET + 
11215 +                       TALITOS_CH_CCPSR_HI);
11216 +               printk(KERN_INFO "%s: CCPSR    ch%d 0x%08x_%08x\n", 
11217 +                               device_get_nameunit(sc->sc_cdev), i, v, v_hi);
11218 +       }
11219 +       ptr = sc->sc_base_addr + TALITOS_CH_DESCBUF;
11220 +       for (i = 0; i < 16; i++) { 
11221 +               v = talitos_read(ptr++); v_hi = talitos_read(ptr++);
11222 +               printk(KERN_INFO "%s: DESCBUF  ch0 0x%08x_%08x (tdp%02d)\n", 
11223 +                               device_get_nameunit(sc->sc_cdev), v, v_hi, i);
11224 +       }
11225 +       return;
11226 +}
11227 +
11228 +
11229 +#ifdef CONFIG_OCF_RANDOMHARVEST
11230 +/* 
11231 + * pull random numbers off the RNG FIFO, not exceeding amount available
11232 + */
11233 +static int
11234 +talitos_read_random(void *arg, u_int32_t *buf, int maxwords)
11235 +{
11236 +       struct talitos_softc *sc = (struct talitos_softc *) arg;
11237 +       int rc;
11238 +       u_int32_t v;
11239 +
11240 +       DPRINTF("%s()\n", __FUNCTION__);
11241 +
11242 +       /* check for things like FIFO underflow */
11243 +       v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
11244 +       if (unlikely(v)) {
11245 +               printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
11246 +                               device_get_nameunit(sc->sc_cdev), v);
11247 +               return 0;
11248 +       }
11249 +       /*
11250 +        * OFL is number of available 64-bit words, 
11251 +        * shift and convert to a 32-bit word count
11252 +        */
11253 +       v = talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI);
11254 +       v = (v & TALITOS_RNGSR_HI_OFL) >> (16 - 1);
11255 +       if (maxwords > v)
11256 +               maxwords = v;
11257 +       for (rc = 0; rc < maxwords; rc++) {
11258 +               buf[rc] = talitos_read(sc->sc_base_addr + 
11259 +                       TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
11260 +       }
11261 +       if (maxwords & 1) {
11262 +               /* 
11263 +                * RNG will complain with an AE in the RNGISR
11264 +                * if we don't complete the pairs of 32-bit reads
11265 +                * to its 64-bit register based FIFO
11266 +                */
11267 +               v = talitos_read(sc->sc_base_addr + 
11268 +                       TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
11269 +       }
11270 +
11271 +       return rc;
11272 +}
11273 +
11274 +static void
11275 +talitos_rng_init(struct talitos_softc *sc)
11276 +{
11277 +       u_int32_t v;
11278 +
11279 +       DPRINTF("%s()\n", __FUNCTION__);
11280 +       /* reset RNG EU */
11281 +       v = talitos_read(sc->sc_base_addr + TALITOS_RNGRCR_HI);
11282 +       v |= TALITOS_RNGRCR_HI_SR;
11283 +       talitos_write(sc->sc_base_addr + TALITOS_RNGRCR_HI, v);
11284 +       while ((talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI) 
11285 +               & TALITOS_RNGSR_HI_RD) == 0)
11286 +                       cpu_relax();
11287 +       /*
11288 +        * we tell the RNG to start filling the RNG FIFO
11289 +        * by writing the RNGDSR 
11290 +        */
11291 +       v = talitos_read(sc->sc_base_addr + TALITOS_RNGDSR_HI);
11292 +       talitos_write(sc->sc_base_addr + TALITOS_RNGDSR_HI, v);
11293 +       /*
11294 +        * 64 bits of data will be pushed onto the FIFO every 
11295 +        * 256 SEC cycles until the FIFO is full.  The RNG then 
11296 +        * attempts to keep the FIFO full.
11297 +        */
11298 +       v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
11299 +       if (v) {
11300 +               printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
11301 +                       device_get_nameunit(sc->sc_cdev), v);
11302 +               return;
11303 +       }
11304 +       /*
11305 +        * n.b. we need to add a FIPS test here - if the RNG is going 
11306 +        * to fail, it's going to fail at reset time
11307 +        */
11308 +       return;
11309 +}
11310 +#endif /* CONFIG_OCF_RANDOMHARVEST */
11311 +
11312 +/*
11313 + * Generate a new software session.
11314 + */
11315 +static int
11316 +talitos_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
11317 +{
11318 +       struct cryptoini *c, *encini = NULL, *macini = NULL;
11319 +       struct talitos_softc *sc = device_get_softc(dev);
11320 +       struct talitos_session *ses = NULL;
11321 +       int sesn;
11322 +
11323 +       DPRINTF("%s()\n", __FUNCTION__);
11324 +       if (sidp == NULL || cri == NULL || sc == NULL) {
11325 +               DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
11326 +               return EINVAL;
11327 +       }
11328 +       for (c = cri; c != NULL; c = c->cri_next) {
11329 +               if (c->cri_alg == CRYPTO_MD5 ||
11330 +                   c->cri_alg == CRYPTO_MD5_HMAC ||
11331 +                   c->cri_alg == CRYPTO_SHA1 ||
11332 +                   c->cri_alg == CRYPTO_SHA1_HMAC ||
11333 +                   c->cri_alg == CRYPTO_NULL_HMAC) {
11334 +                       if (macini)
11335 +                               return EINVAL;
11336 +                       macini = c;
11337 +               } else if (c->cri_alg == CRYPTO_DES_CBC ||
11338 +                   c->cri_alg == CRYPTO_3DES_CBC ||
11339 +                   c->cri_alg == CRYPTO_AES_CBC ||
11340 +                   c->cri_alg == CRYPTO_NULL_CBC) {
11341 +                       if (encini)
11342 +                               return EINVAL;
11343 +                       encini = c;
11344 +               } else {
11345 +                       DPRINTF("UNKNOWN c->cri_alg %d\n", encini->cri_alg);
11346 +                       return EINVAL;
11347 +               }
11348 +       }
11349 +       if (encini == NULL && macini == NULL)
11350 +               return EINVAL;
11351 +       if (encini) {   
11352 +               /* validate key length */
11353 +               switch (encini->cri_alg) {
11354 +               case CRYPTO_DES_CBC:
11355 +                       if (encini->cri_klen != 64)
11356 +                               return EINVAL;
11357 +                       break;
11358 +               case CRYPTO_3DES_CBC:
11359 +                       if (encini->cri_klen != 192) {
11360 +                               return EINVAL;
11361 +                       }
11362 +                       break;
11363 +               case CRYPTO_AES_CBC:
11364 +                       if (encini->cri_klen != 128 &&
11365 +                           encini->cri_klen != 192 &&
11366 +                           encini->cri_klen != 256)
11367 +                               return EINVAL;
11368 +                       break;
11369 +               default:
11370 +                       DPRINTF("UNKNOWN encini->cri_alg %d\n", 
11371 +                               encini->cri_alg);
11372 +                       return EINVAL;
11373 +               }
11374 +       }
11375 +
11376 +       if (sc->sc_sessions == NULL) {
11377 +               ses = sc->sc_sessions = (struct talitos_session *)
11378 +                       kmalloc(sizeof(struct talitos_session), SLAB_ATOMIC);
11379 +               if (ses == NULL)
11380 +                       return ENOMEM;
11381 +               memset(ses, 0, sizeof(struct talitos_session));
11382 +               sesn = 0;
11383 +               sc->sc_nsessions = 1;
11384 +       } else {
11385 +               for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
11386 +                       if (sc->sc_sessions[sesn].ses_used == 0) {
11387 +                               ses = &sc->sc_sessions[sesn];
11388 +                               break;
11389 +                       }
11390 +               }
11391 +
11392 +               if (ses == NULL) {
11393 +                       /* allocating session */
11394 +                       sesn = sc->sc_nsessions;
11395 +                       ses = (struct talitos_session *) kmalloc(
11396 +                               (sesn + 1) * sizeof(struct talitos_session), 
11397 +                               SLAB_ATOMIC);
11398 +                       if (ses == NULL)
11399 +                               return ENOMEM;
11400 +                       memset(ses, 0,
11401 +                               (sesn + 1) * sizeof(struct talitos_session));
11402 +                       memcpy(ses, sc->sc_sessions, 
11403 +                               sesn * sizeof(struct talitos_session));
11404 +                       memset(sc->sc_sessions, 0,
11405 +                               sesn * sizeof(struct talitos_session));
11406 +                       kfree(sc->sc_sessions);
11407 +                       sc->sc_sessions = ses;
11408 +                       ses = &sc->sc_sessions[sesn];
11409 +                       sc->sc_nsessions++;
11410 +               }
11411 +       }
11412 +
11413 +       ses->ses_used = 1;
11414 +
11415 +       if (encini) {
11416 +               /* get an IV */
11417 +               /* XXX may read fewer than requested */
11418 +               read_random(ses->ses_iv, sizeof(ses->ses_iv));
11419 +
11420 +               ses->ses_klen = (encini->cri_klen + 7) / 8;
11421 +               memcpy(ses->ses_key, encini->cri_key, ses->ses_klen);
11422 +               if (macini) {
11423 +                       /* doing hash on top of cipher */
11424 +                       ses->ses_hmac_len = (macini->cri_klen + 7) / 8;
11425 +                       memcpy(ses->ses_hmac, macini->cri_key,
11426 +                               ses->ses_hmac_len);
11427 +               }
11428 +       } else if (macini) {
11429 +               /* doing hash */
11430 +               ses->ses_klen = (macini->cri_klen + 7) / 8;
11431 +               memcpy(ses->ses_key, macini->cri_key, ses->ses_klen);
11432 +       }
11433 +
11434 +       /* back compat way of determining MSC result len */
11435 +       if (macini) {
11436 +               ses->ses_mlen = macini->cri_mlen;
11437 +               if (ses->ses_mlen == 0) {
11438 +                       if (macini->cri_alg == CRYPTO_MD5_HMAC)
11439 +                               ses->ses_mlen = MD5_HASH_LEN;
11440 +                       else
11441 +                               ses->ses_mlen = SHA1_HASH_LEN;
11442 +               }
11443 +       }
11444 +
11445 +       /* really should make up a template td here, 
11446 +        * and only fill things like i/o and direction in process() */
11447 +
11448 +       /* assign session ID */
11449 +       *sidp = TALITOS_SID(sc->sc_num, sesn);
11450 +       return 0;
11451 +}
11452 +
11453 +/*
11454 + * Deallocate a session.
11455 + */
11456 +static int
11457 +talitos_freesession(device_t dev, u_int64_t tid)
11458 +{
11459 +       struct talitos_softc *sc = device_get_softc(dev);
11460 +       int session, ret;
11461 +       u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
11462 +
11463 +       if (sc == NULL)
11464 +               return EINVAL;
11465 +       session = TALITOS_SESSION(sid);
11466 +       if (session < sc->sc_nsessions) {
11467 +               memset(&sc->sc_sessions[session], 0,
11468 +                       sizeof(sc->sc_sessions[session]));
11469 +               ret = 0;
11470 +       } else
11471 +               ret = EINVAL;
11472 +       return ret;
11473 +}
11474 +
11475 +/*
11476 + * launch device processing - it will come back with done notification 
11477 + * in the form of an interrupt and/or HDR_DONE_BITS in header 
11478 + */
11479 +static int 
11480 +talitos_submit(
11481 +       struct talitos_softc *sc,
11482 +       struct talitos_desc *td,
11483 +       int chsel)
11484 +{
11485 +       u_int32_t v;
11486 +
11487 +       v = dma_map_single(NULL, td, sizeof(*td), DMA_TO_DEVICE);
11488 +       talitos_write(sc->sc_base_addr + 
11489 +               chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF, 0);
11490 +       talitos_write(sc->sc_base_addr + 
11491 +               chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF_HI, v);
11492 +       return 0;
11493 +}
11494 +
11495 +static int
11496 +talitos_process(device_t dev, struct cryptop *crp, int hint)
11497 +{
11498 +       int i, err = 0, ivsize;
11499 +       struct talitos_softc *sc = device_get_softc(dev);
11500 +       struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
11501 +       caddr_t iv;
11502 +       struct talitos_session *ses;
11503 +       struct talitos_desc *td;
11504 +       unsigned long flags;
11505 +       /* descriptor mappings */
11506 +       int hmac_key, hmac_data, cipher_iv, cipher_key, 
11507 +               in_fifo, out_fifo, cipher_iv_out;
11508 +       static int chsel = -1;
11509 +
11510 +       DPRINTF("%s()\n", __FUNCTION__);
11511 +
11512 +       if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
11513 +               return EINVAL;
11514 +       }
11515 +       crp->crp_etype = 0;
11516 +       if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
11517 +               return EINVAL;
11518 +       }
11519 +
11520 +       ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)];
11521 +
11522 +        /* enter the channel scheduler */ 
11523 +       spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
11524 +
11525 +       /* reuse channel that already had/has requests for the required EU */
11526 +       for (i = 0; i < sc->sc_num_channels; i++) {
11527 +               if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg)
11528 +                       break;
11529 +       }
11530 +       if (i == sc->sc_num_channels) {
11531 +               /*
11532 +                * haven't seen this algo the last sc_num_channels or more
11533 +                * use round robin in this case
11534 +                * nb: sc->sc_num_channels must be power of 2 
11535 +                */
11536 +               chsel = (chsel + 1) & (sc->sc_num_channels - 1);
11537 +       } else {
11538 +               /*
11539 +                * matches channel with same target execution unit; 
11540 +                * use same channel in this case
11541 +                */
11542 +               chsel = i;
11543 +       }
11544 +       sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg;
11545 +
11546 +        /* release the channel scheduler lock */ 
11547 +       spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
11548 +
11549 +       /* acquire the selected channel fifo lock */
11550 +       spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags);
11551 +
11552 +       /* find and reserve next available descriptor-cryptop pair */
11553 +       for (i = 0; i < sc->sc_chfifo_len; i++) {
11554 +               if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) {
11555 +                       /* 
11556 +                        * ensure correct descriptor formation by
11557 +                        * avoiding inadvertently setting "optional" entries
11558 +                        * e.g. not using "optional" dptr2 for MD/HMAC descs
11559 +                        */
11560 +                       memset(&sc->sc_chnfifo[chsel][i].cf_desc,
11561 +                               0, sizeof(*td));
11562 +                       /* reserve it with done notification request bit */
11563 +                       sc->sc_chnfifo[chsel][i].cf_desc.hdr |= 
11564 +                               TALITOS_DONE_NOTIFY;
11565 +                       break;
11566 +               }
11567 +       }
11568 +       spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags);
11569 +
11570 +       if (i == sc->sc_chfifo_len) {
11571 +               /* fifo full */
11572 +               err = ERESTART;
11573 +               goto errout;
11574 +       }
11575 +       
11576 +       td = &sc->sc_chnfifo[chsel][i].cf_desc;
11577 +       sc->sc_chnfifo[chsel][i].cf_crp = crp;
11578 +
11579 +       crd1 = crp->crp_desc;
11580 +       if (crd1 == NULL) {
11581 +               err = EINVAL;
11582 +               goto errout;
11583 +       }
11584 +       crd2 = crd1->crd_next;
11585 +       /* prevent compiler warning */
11586 +       hmac_key = 0;
11587 +       hmac_data = 0;
11588 +       if (crd2 == NULL) {
11589 +               td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU;
11590 +               /* assign descriptor dword ptr mappings for this desc. type */
11591 +               cipher_iv = 1;
11592 +               cipher_key = 2;
11593 +               in_fifo = 3;
11594 +               cipher_iv_out = 5;
11595 +               if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
11596 +                   crd1->crd_alg == CRYPTO_SHA1_HMAC ||
11597 +                   crd1->crd_alg == CRYPTO_SHA1 ||
11598 +                   crd1->crd_alg == CRYPTO_MD5) {
11599 +                       out_fifo = 5;
11600 +                       maccrd = crd1;
11601 +                       enccrd = NULL;
11602 +               } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
11603 +                   crd1->crd_alg == CRYPTO_3DES_CBC ||
11604 +                   crd1->crd_alg == CRYPTO_AES_CBC ||
11605 +                   crd1->crd_alg == CRYPTO_ARC4) {
11606 +                       out_fifo = 4;
11607 +                       maccrd = NULL;
11608 +                       enccrd = crd1;
11609 +               } else {
11610 +                       DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg);
11611 +                       err = EINVAL;
11612 +                       goto errout;
11613 +               }
11614 +       } else {
11615 +               if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) {
11616 +                       td->hdr |= TD_TYPE_IPSEC_ESP;
11617 +               } else {
11618 +                       DPRINTF("unimplemented: multiple descriptor ipsec\n");
11619 +                       err = EINVAL;
11620 +                       goto errout;
11621 +               }
11622 +               /* assign descriptor dword ptr mappings for this desc. type */
11623 +               hmac_key = 0;
11624 +               hmac_data = 1;
11625 +               cipher_iv = 2;
11626 +               cipher_key = 3;
11627 +               in_fifo = 4;
11628 +               out_fifo = 5;
11629 +               cipher_iv_out = 6;
11630 +               if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
11631 +                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
11632 +                     crd1->crd_alg == CRYPTO_MD5 ||
11633 +                     crd1->crd_alg == CRYPTO_SHA1) &&
11634 +                   (crd2->crd_alg == CRYPTO_DES_CBC ||
11635 +                    crd2->crd_alg == CRYPTO_3DES_CBC ||
11636 +                    crd2->crd_alg == CRYPTO_AES_CBC ||
11637 +                    crd2->crd_alg == CRYPTO_ARC4) &&
11638 +                   ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
11639 +                       maccrd = crd1;
11640 +                       enccrd = crd2;
11641 +               } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
11642 +                    crd1->crd_alg == CRYPTO_ARC4 ||
11643 +                    crd1->crd_alg == CRYPTO_3DES_CBC ||
11644 +                    crd1->crd_alg == CRYPTO_AES_CBC) &&
11645 +                   (crd2->crd_alg == CRYPTO_MD5_HMAC ||
11646 +                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
11647 +                     crd2->crd_alg == CRYPTO_MD5 ||
11648 +                     crd2->crd_alg == CRYPTO_SHA1) &&
11649 +                   (crd1->crd_flags & CRD_F_ENCRYPT)) {
11650 +                       enccrd = crd1;
11651 +                       maccrd = crd2;
11652 +               } else {
11653 +                       /* We cannot order the SEC as requested */
11654 +                       printk("%s: cannot do the order\n",
11655 +                                       device_get_nameunit(sc->sc_cdev));
11656 +                       err = EINVAL;
11657 +                       goto errout;
11658 +               }
11659 +       }
11660 +       /* assign in_fifo and out_fifo based on input/output struct type */
11661 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
11662 +               /* using SKB buffers */
11663 +               struct sk_buff *skb = (struct sk_buff *)crp->crp_buf;
11664 +               if (skb_shinfo(skb)->nr_frags) {
11665 +                       printk("%s: skb frags unimplemented\n",
11666 +                                       device_get_nameunit(sc->sc_cdev));
11667 +                       err = EINVAL;
11668 +                       goto errout;
11669 +               }
11670 +               td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data, 
11671 +                       skb->len, DMA_TO_DEVICE);
11672 +               td->ptr[in_fifo].len = skb->len;
11673 +               td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data, 
11674 +                       skb->len, DMA_TO_DEVICE);
11675 +               td->ptr[out_fifo].len = skb->len;
11676 +               td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
11677 +                       skb->len, DMA_TO_DEVICE);
11678 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
11679 +               /* using IOV buffers */
11680 +               struct uio *uiop = (struct uio *)crp->crp_buf;
11681 +               if (uiop->uio_iovcnt > 1) {
11682 +                       printk("%s: iov frags unimplemented\n",
11683 +                                       device_get_nameunit(sc->sc_cdev));
11684 +                       err = EINVAL;
11685 +                       goto errout;
11686 +               }
11687 +               td->ptr[in_fifo].ptr = dma_map_single(NULL,
11688 +                       uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE);
11689 +               td->ptr[in_fifo].len = crp->crp_ilen;
11690 +               /* crp_olen is never set; always use crp_ilen */
11691 +               td->ptr[out_fifo].ptr = dma_map_single(NULL,
11692 +                       uiop->uio_iov->iov_base,
11693 +                       crp->crp_ilen, DMA_TO_DEVICE);
11694 +               td->ptr[out_fifo].len = crp->crp_ilen;
11695 +       } else {
11696 +               /* using contig buffers */
11697 +               td->ptr[in_fifo].ptr = dma_map_single(NULL,
11698 +                       crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
11699 +               td->ptr[in_fifo].len = crp->crp_ilen;
11700 +               td->ptr[out_fifo].ptr = dma_map_single(NULL,
11701 +                       crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
11702 +               td->ptr[out_fifo].len = crp->crp_ilen;
11703 +       }
11704 +       if (enccrd) {
11705 +               switch (enccrd->crd_alg) {
11706 +               case CRYPTO_3DES_CBC:
11707 +                       td->hdr |= TALITOS_MODE0_DEU_3DES;
11708 +                       /* FALLTHROUGH */
11709 +               case CRYPTO_DES_CBC:
11710 +                       td->hdr |= TALITOS_SEL0_DEU
11711 +                               |  TALITOS_MODE0_DEU_CBC;
11712 +                       if (enccrd->crd_flags & CRD_F_ENCRYPT)
11713 +                               td->hdr |= TALITOS_MODE0_DEU_ENC;
11714 +                       ivsize = 2*sizeof(u_int32_t);
11715 +                       DPRINTF("%cDES ses %d ch %d len %d\n",
11716 +                               (td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1',
11717 +                               (u32)TALITOS_SESSION(crp->crp_sid),
11718 +                               chsel, td->ptr[in_fifo].len);
11719 +                       break;
11720 +               case CRYPTO_AES_CBC:
11721 +                       td->hdr |= TALITOS_SEL0_AESU
11722 +                               |  TALITOS_MODE0_AESU_CBC;
11723 +                       if (enccrd->crd_flags & CRD_F_ENCRYPT)
11724 +                               td->hdr |= TALITOS_MODE0_AESU_ENC;
11725 +                       ivsize = 4*sizeof(u_int32_t);
11726 +                       DPRINTF("AES  ses %d ch %d len %d\n",
11727 +                               (u32)TALITOS_SESSION(crp->crp_sid),
11728 +                               chsel, td->ptr[in_fifo].len);
11729 +                       break;
11730 +               default:
11731 +                       printk("%s: unimplemented enccrd->crd_alg %d\n",
11732 +                                       device_get_nameunit(sc->sc_cdev), enccrd->crd_alg);
11733 +                       err = EINVAL;
11734 +                       goto errout;
11735 +               }
11736 +               /*
11737 +                * Setup encrypt/decrypt state.  When using basic ops
11738 +                * we can't use an inline IV because hash/crypt offset
11739 +                * must be from the end of the IV to the start of the
11740 +                * crypt data and this leaves out the preceding header
11741 +                * from the hash calculation.  Instead we place the IV
11742 +                * in the state record and set the hash/crypt offset to
11743 +                * copy both the header+IV.
11744 +                */
11745 +               if (enccrd->crd_flags & CRD_F_ENCRYPT) {
11746 +                       td->hdr |= TALITOS_DIR_OUTBOUND; 
11747 +                       if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
11748 +                               iv = enccrd->crd_iv;
11749 +                       else
11750 +                               iv = (caddr_t) ses->ses_iv;
11751 +                       if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
11752 +                               crypto_copyback(crp->crp_flags, crp->crp_buf,
11753 +                                   enccrd->crd_inject, ivsize, iv);
11754 +                       }
11755 +               } else {
11756 +                       td->hdr |= TALITOS_DIR_INBOUND; 
11757 +                       if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
11758 +                               iv = enccrd->crd_iv;
11759 +                               bcopy(enccrd->crd_iv, iv, ivsize);
11760 +                       } else {
11761 +                               iv = (caddr_t) ses->ses_iv;
11762 +                               crypto_copydata(crp->crp_flags, crp->crp_buf,
11763 +                                   enccrd->crd_inject, ivsize, iv);
11764 +                       }
11765 +               }
11766 +               td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize, 
11767 +                       DMA_TO_DEVICE);
11768 +               td->ptr[cipher_iv].len = ivsize;
11769 +               /*
11770 +                * we don't need the cipher iv out length/pointer
11771 +                * field to do ESP IPsec. Therefore we set the len field as 0,
11772 +                * which tells the SEC not to do anything with this len/ptr
11773 +                * field. Previously, when length/pointer as pointing to iv,
11774 +                * it gave us corruption of packets.
11775 +                */
11776 +               td->ptr[cipher_iv_out].len = 0;
11777 +       }
11778 +       if (enccrd && maccrd) {
11779 +               /* this is ipsec only for now */
11780 +               td->hdr |= TALITOS_SEL1_MDEU
11781 +                       |  TALITOS_MODE1_MDEU_INIT
11782 +                       |  TALITOS_MODE1_MDEU_PAD;
11783 +               switch (maccrd->crd_alg) {
11784 +                       case    CRYPTO_MD5:     
11785 +                               td->hdr |= TALITOS_MODE1_MDEU_MD5;
11786 +                               break;
11787 +                       case    CRYPTO_MD5_HMAC:        
11788 +                               td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC;
11789 +                               break;
11790 +                       case    CRYPTO_SHA1:    
11791 +                               td->hdr |= TALITOS_MODE1_MDEU_SHA1;
11792 +                               break;
11793 +                       case    CRYPTO_SHA1_HMAC:       
11794 +                               td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC;
11795 +                               break;
11796 +                       default:
11797 +                               /* We cannot order the SEC as requested */
11798 +                               printk("%s: cannot do the order\n",
11799 +                                               device_get_nameunit(sc->sc_cdev));
11800 +                               err = EINVAL;
11801 +                               goto errout;
11802 +               }
11803 +               if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
11804 +                  (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
11805 +                       /*
11806 +                        * The offset from hash data to the start of
11807 +                        * crypt data is the difference in the skips.
11808 +                        */
11809 +                       /* ipsec only for now */
11810 +                       td->ptr[hmac_key].ptr = dma_map_single(NULL, 
11811 +                               ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE);
11812 +                       td->ptr[hmac_key].len = ses->ses_hmac_len;
11813 +                       td->ptr[in_fifo].ptr  += enccrd->crd_skip;
11814 +                       td->ptr[in_fifo].len  =  enccrd->crd_len;
11815 +                       td->ptr[out_fifo].ptr += enccrd->crd_skip;
11816 +                       td->ptr[out_fifo].len =  enccrd->crd_len;
11817 +                       /* bytes of HMAC to postpend to ciphertext */
11818 +                       td->ptr[out_fifo].extent =  ses->ses_mlen;
11819 +                       td->ptr[hmac_data].ptr += maccrd->crd_skip; 
11820 +                       td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip;
11821 +               }
11822 +               if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
11823 +                       printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n",
11824 +                                       device_get_nameunit(sc->sc_cdev));
11825 +               }
11826 +       }
11827 +       if (!enccrd && maccrd) {
11828 +               /* single MD5 or SHA */
11829 +               td->hdr |= TALITOS_SEL0_MDEU
11830 +                               |  TALITOS_MODE0_MDEU_INIT
11831 +                               |  TALITOS_MODE0_MDEU_PAD;
11832 +               switch (maccrd->crd_alg) {
11833 +                       case    CRYPTO_MD5:     
11834 +                               td->hdr |= TALITOS_MODE0_MDEU_MD5;
11835 +                               DPRINTF("MD5  ses %d ch %d len %d\n",
11836 +                                       (u32)TALITOS_SESSION(crp->crp_sid), 
11837 +                                       chsel, td->ptr[in_fifo].len);
11838 +                               break;
11839 +                       case    CRYPTO_MD5_HMAC:        
11840 +                               td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC;
11841 +                               break;
11842 +                       case    CRYPTO_SHA1:    
11843 +                               td->hdr |= TALITOS_MODE0_MDEU_SHA1;
11844 +                               DPRINTF("SHA1 ses %d ch %d len %d\n",
11845 +                                       (u32)TALITOS_SESSION(crp->crp_sid), 
11846 +                                       chsel, td->ptr[in_fifo].len);
11847 +                               break;
11848 +                       case    CRYPTO_SHA1_HMAC:       
11849 +                               td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC;
11850 +                               break;
11851 +                       default:
11852 +                               /* We cannot order the SEC as requested */
11853 +                               DPRINTF("cannot do the order\n");
11854 +                               err = EINVAL;
11855 +                               goto errout;
11856 +               }
11857 +
11858 +               if (crp->crp_flags & CRYPTO_F_IOV)
11859 +                       td->ptr[out_fifo].ptr += maccrd->crd_inject;
11860 +
11861 +               if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
11862 +                  (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
11863 +                       td->ptr[hmac_key].ptr = dma_map_single(NULL, 
11864 +                               ses->ses_hmac, ses->ses_hmac_len, 
11865 +                               DMA_TO_DEVICE);
11866 +                       td->ptr[hmac_key].len = ses->ses_hmac_len;
11867 +               }
11868 +       } 
11869 +       else {
11870 +               /* using process key (session data has duplicate) */
11871 +               td->ptr[cipher_key].ptr = dma_map_single(NULL, 
11872 +                       enccrd->crd_key, (enccrd->crd_klen + 7) / 8, 
11873 +                       DMA_TO_DEVICE);
11874 +               td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8;
11875 +       }
11876 +       /* descriptor complete - GO! */
11877 +       return talitos_submit(sc, td, chsel);
11878 +
11879 +errout:
11880 +       if (err != ERESTART) {
11881 +               crp->crp_etype = err;
11882 +               crypto_done(crp);
11883 +       }
11884 +       return err;
11885 +}
11886 +
11887 +/* go through all channels descriptors, notifying OCF what has 
11888 + * _and_hasn't_ successfully completed and reset the device 
11889 + * (otherwise it's up to decoding desc hdrs!)
11890 + */
11891 +static void talitos_errorprocessing(struct talitos_softc *sc)
11892 +{
11893 +       unsigned long flags;
11894 +       int i, j;
11895 +
11896 +       /* disable further scheduling until under control */
11897 +       spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
11898 +
11899 +       if (debug) dump_talitos_status(sc);
11900 +       /* go through descriptors, try and salvage those successfully done, 
11901 +        * and EIO those that weren't
11902 +        */
11903 +       for (i = 0; i < sc->sc_num_channels; i++) {
11904 +               spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
11905 +               for (j = 0; j < sc->sc_chfifo_len; j++) {
11906 +                       if (sc->sc_chnfifo[i][j].cf_desc.hdr) {
11907 +                               if ((sc->sc_chnfifo[i][j].cf_desc.hdr 
11908 +                                       & TALITOS_HDR_DONE_BITS) 
11909 +                                       != TALITOS_HDR_DONE_BITS) {
11910 +                                       /* this one didn't finish */
11911 +                                       /* signify in crp->etype */
11912 +                                       sc->sc_chnfifo[i][j].cf_crp->crp_etype 
11913 +                                               = EIO;
11914 +                               }
11915 +                       } else
11916 +                               continue; /* free entry */
11917 +                       /* either way, notify ocf */
11918 +                       crypto_done(sc->sc_chnfifo[i][j].cf_crp);
11919 +                       /* and tag it available again
11920 +                        *
11921 +                        * memset to ensure correct descriptor formation by
11922 +                        * avoiding inadvertently setting "optional" entries
11923 +                        * e.g. not using "optional" dptr2 MD/HMAC processing
11924 +                        */
11925 +                       memset(&sc->sc_chnfifo[i][j].cf_desc,
11926 +                               0, sizeof(struct talitos_desc));
11927 +               }
11928 +               spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
11929 +       }
11930 +       /* reset and initialize the SEC h/w device */
11931 +       talitos_reset_device(sc);
11932 +       talitos_init_device(sc);
11933 +#ifdef CONFIG_OCF_RANDOMHARVEST
11934 +       if (sc->sc_exec_units & TALITOS_HAS_EU_RNG)
11935 +               talitos_rng_init(sc);
11936 +#endif
11937 +
11938 +       /* Okay. Stand by. */
11939 +       spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
11940 +
11941 +       return;
11942 +}
11943 +
11944 +/* go through all channels descriptors, notifying OCF what's been done */
11945 +static void talitos_doneprocessing(struct talitos_softc *sc)
11946 +{
11947 +       unsigned long flags;
11948 +       int i, j;
11949 +
11950 +       /* go through descriptors looking for done bits */
11951 +       for (i = 0; i < sc->sc_num_channels; i++) {
11952 +               spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
11953 +               for (j = 0; j < sc->sc_chfifo_len; j++) {
11954 +                       /* descriptor has done bits set? */
11955 +                       if ((sc->sc_chnfifo[i][j].cf_desc.hdr 
11956 +                               & TALITOS_HDR_DONE_BITS) 
11957 +                               == TALITOS_HDR_DONE_BITS) {
11958 +                               /* notify ocf */
11959 +                               crypto_done(sc->sc_chnfifo[i][j].cf_crp);
11960 +                               /* and tag it available again
11961 +                                *
11962 +                                * memset to ensure correct descriptor formation by
11963 +                                * avoiding inadvertently setting "optional" entries
11964 +                                * e.g. not using "optional" dptr2 MD/HMAC processing
11965 +                                */
11966 +                               memset(&sc->sc_chnfifo[i][j].cf_desc,
11967 +                                       0, sizeof(struct talitos_desc));
11968 +                       }
11969 +               }
11970 +               spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
11971 +       }
11972 +       return;
11973 +}
11974 +
11975 +static irqreturn_t
11976 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
11977 +talitos_intr(int irq, void *arg)
11978 +#else
11979 +talitos_intr(int irq, void *arg, struct pt_regs *regs)
11980 +#endif
11981 +{
11982 +       struct talitos_softc *sc = arg;
11983 +       u_int32_t v, v_hi;
11984 +       
11985 +       /* ack */
11986 +       v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
11987 +       v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
11988 +       talitos_write(sc->sc_base_addr + TALITOS_ICR, v);
11989 +       talitos_write(sc->sc_base_addr + TALITOS_ICR_HI, v_hi);
11990 +
11991 +       if (unlikely(v & TALITOS_ISR_ERROR)) {
11992 +               /* Okay, Houston, we've had a problem here. */
11993 +               printk(KERN_DEBUG "%s: got error interrupt - ISR 0x%08x_%08x\n",
11994 +                               device_get_nameunit(sc->sc_cdev), v, v_hi);
11995 +               talitos_errorprocessing(sc);
11996 +       } else
11997 +       if (likely(v & TALITOS_ISR_DONE)) {
11998 +               talitos_doneprocessing(sc);
11999 +       }
12000 +       return IRQ_HANDLED;
12001 +}
12002 +
12003 +/*
12004 + * Initialize registers we need to touch only once.
12005 + */
12006 +static void
12007 +talitos_init_device(struct talitos_softc *sc)
12008 +{
12009 +       u_int32_t v;
12010 +       int i;
12011 +
12012 +       DPRINTF("%s()\n", __FUNCTION__);
12013 +
12014 +       /* init all channels */
12015 +       for (i = 0; i < sc->sc_num_channels; i++) {
12016 +               v = talitos_read(sc->sc_base_addr + 
12017 +                       i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI);
12018 +               v |= TALITOS_CH_CCCR_HI_CDWE
12019 +                 |  TALITOS_CH_CCCR_HI_CDIE;  /* invoke interrupt if done */
12020 +               talitos_write(sc->sc_base_addr + 
12021 +                       i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI, v);
12022 +       }
12023 +       /* enable all interrupts */
12024 +       v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
12025 +       v |= TALITOS_IMR_ALL;
12026 +       talitos_write(sc->sc_base_addr + TALITOS_IMR, v);
12027 +       v = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
12028 +       v |= TALITOS_IMR_HI_ERRONLY;
12029 +       talitos_write(sc->sc_base_addr + TALITOS_IMR_HI, v);
12030 +       return;
12031 +}
12032 +
12033 +/*
12034 + * set the master reset bit on the device.
12035 + */
12036 +static void
12037 +talitos_reset_device_master(struct talitos_softc *sc)
12038 +{
12039 +       u_int32_t v;
12040 +
12041 +       /* Reset the device by writing 1 to MCR:SWR and waiting 'til cleared */
12042 +       v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
12043 +       talitos_write(sc->sc_base_addr + TALITOS_MCR, v | TALITOS_MCR_SWR);
12044 +
12045 +       while (talitos_read(sc->sc_base_addr + TALITOS_MCR) & TALITOS_MCR_SWR)
12046 +               cpu_relax();
12047 +
12048 +       return;
12049 +}
12050 +
12051 +/*
12052 + * Resets the device.  Values in the registers are left as is
12053 + * from the reset (i.e. initial values are assigned elsewhere).
12054 + */
12055 +static void
12056 +talitos_reset_device(struct talitos_softc *sc)
12057 +{
12058 +       u_int32_t v;
12059 +       int i;
12060 +
12061 +       DPRINTF("%s()\n", __FUNCTION__);
12062 +
12063 +       /*
12064 +        * Master reset
12065 +        * errata documentation: warning: certain SEC interrupts 
12066 +        * are not fully cleared by writing the MCR:SWR bit, 
12067 +        * set bit twice to completely reset 
12068 +        */
12069 +       talitos_reset_device_master(sc);        /* once */
12070 +       talitos_reset_device_master(sc);        /* and once again */
12071 +       
12072 +       /* reset all channels */
12073 +       for (i = 0; i < sc->sc_num_channels; i++) {
12074 +               v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
12075 +                       TALITOS_CH_CCCR);
12076 +               talitos_write(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
12077 +                       TALITOS_CH_CCCR, v | TALITOS_CH_CCCR_RESET);
12078 +       }
12079 +}
12080 +
12081 +/* Set up the crypto device structure, private data,
12082 + * and anything else we need before we start */
12083 +#ifdef CONFIG_PPC_MERGE
12084 +static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match)
12085 +#else
12086 +static int talitos_probe(struct platform_device *pdev)
12087 +#endif
12088 +{
12089 +       struct talitos_softc *sc = NULL;
12090 +       struct resource *r;
12091 +#ifdef CONFIG_PPC_MERGE
12092 +       struct device *device = &ofdev->dev;
12093 +       struct device_node *np = ofdev->node;
12094 +       const unsigned int *prop;
12095 +       int err;
12096 +       struct resource res;
12097 +#endif
12098 +       static int num_chips = 0;
12099 +       int rc;
12100 +       int i;
12101 +
12102 +       DPRINTF("%s()\n", __FUNCTION__);
12103 +
12104 +       sc = (struct talitos_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
12105 +       if (!sc)
12106 +               return -ENOMEM;
12107 +       memset(sc, 0, sizeof(*sc));
12108 +
12109 +       softc_device_init(sc, DRV_NAME, num_chips, talitos_methods);
12110 +
12111 +       sc->sc_irq = -1;
12112 +       sc->sc_cid = -1;
12113 +#ifndef CONFIG_PPC_MERGE
12114 +       sc->sc_dev = pdev;
12115 +#endif
12116 +       sc->sc_num = num_chips++;
12117 +
12118 +#ifdef CONFIG_PPC_MERGE
12119 +       dev_set_drvdata(device, sc);
12120 +#else
12121 +       platform_set_drvdata(sc->sc_dev, sc);
12122 +#endif
12123 +
12124 +       /* get the irq line */
12125 +#ifdef CONFIG_PPC_MERGE
12126 +       err = of_address_to_resource(np, 0, &res);
12127 +       if (err)
12128 +               return -EINVAL;
12129 +       r = &res;
12130 +
12131 +       sc->sc_irq = irq_of_parse_and_map(np, 0);
12132 +#else
12133 +       /* get a pointer to the register memory */
12134 +       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
12135 +
12136 +       sc->sc_irq = platform_get_irq(pdev, 0);
12137 +#endif
12138 +       rc = request_irq(sc->sc_irq, talitos_intr, 0,
12139 +                       device_get_nameunit(sc->sc_cdev), sc);
12140 +       if (rc) {
12141 +               printk(KERN_ERR "%s: failed to hook irq %d\n", 
12142 +                               device_get_nameunit(sc->sc_cdev), sc->sc_irq);
12143 +               sc->sc_irq = -1;
12144 +               goto out;
12145 +       }
12146 +
12147 +       sc->sc_base_addr = (ocf_iomem_t) ioremap(r->start, (r->end - r->start));
12148 +       if (!sc->sc_base_addr) {
12149 +               printk(KERN_ERR "%s: failed to ioremap\n",
12150 +                               device_get_nameunit(sc->sc_cdev));
12151 +               goto out;
12152 +       }
12153 +
12154 +       /* figure out our SEC's properties and capabilities */
12155 +       sc->sc_chiprev = (u64)talitos_read(sc->sc_base_addr + TALITOS_ID) << 32
12156 +                | talitos_read(sc->sc_base_addr + TALITOS_ID_HI);
12157 +       DPRINTF("sec id 0x%llx\n", sc->sc_chiprev);
12158 +
12159 +#ifdef CONFIG_PPC_MERGE
12160 +       /* get SEC properties from device tree, defaulting to SEC 2.0 */
12161 +
12162 +       prop = of_get_property(np, "num-channels", NULL);
12163 +       sc->sc_num_channels = prop ? *prop : TALITOS_NCHANNELS_SEC_2_0;
12164 +
12165 +       prop = of_get_property(np, "channel-fifo-len", NULL);
12166 +       sc->sc_chfifo_len = prop ? *prop : TALITOS_CHFIFOLEN_SEC_2_0;
12167 +
12168 +       prop = of_get_property(np, "exec-units-mask", NULL);
12169 +       sc->sc_exec_units = prop ? *prop : TALITOS_HAS_EUS_SEC_2_0;
12170 +
12171 +       prop = of_get_property(np, "descriptor-types-mask", NULL);
12172 +       sc->sc_desc_types = prop ? *prop : TALITOS_HAS_DESCTYPES_SEC_2_0;
12173 +#else
12174 +       /* bulk should go away with openfirmware flat device tree support */
12175 +       if (sc->sc_chiprev & TALITOS_ID_SEC_2_0) {
12176 +               sc->sc_num_channels = TALITOS_NCHANNELS_SEC_2_0;
12177 +               sc->sc_chfifo_len = TALITOS_CHFIFOLEN_SEC_2_0;
12178 +               sc->sc_exec_units = TALITOS_HAS_EUS_SEC_2_0;
12179 +               sc->sc_desc_types = TALITOS_HAS_DESCTYPES_SEC_2_0;
12180 +       } else {
12181 +               printk(KERN_ERR "%s: failed to id device\n",
12182 +                               device_get_nameunit(sc->sc_cdev));
12183 +               goto out;
12184 +       }
12185 +#endif
12186 +
12187 +       /* + 1 is for the meta-channel lock used by the channel scheduler */
12188 +       sc->sc_chnfifolock = (spinlock_t *) kmalloc(
12189 +               (sc->sc_num_channels + 1) * sizeof(spinlock_t), GFP_KERNEL);
12190 +       if (!sc->sc_chnfifolock)
12191 +               goto out;
12192 +       for (i = 0; i < sc->sc_num_channels + 1; i++) {
12193 +               spin_lock_init(&sc->sc_chnfifolock[i]);
12194 +       }
12195 +
12196 +       sc->sc_chnlastalg = (int *) kmalloc(
12197 +               sc->sc_num_channels * sizeof(int), GFP_KERNEL);
12198 +       if (!sc->sc_chnlastalg)
12199 +               goto out;
12200 +       memset(sc->sc_chnlastalg, 0, sc->sc_num_channels * sizeof(int));
12201 +
12202 +       sc->sc_chnfifo = (struct desc_cryptop_pair **) kmalloc(
12203 +               sc->sc_num_channels * sizeof(struct desc_cryptop_pair *), 
12204 +               GFP_KERNEL);
12205 +       if (!sc->sc_chnfifo)
12206 +               goto out;
12207 +       for (i = 0; i < sc->sc_num_channels; i++) {
12208 +               sc->sc_chnfifo[i] = (struct desc_cryptop_pair *) kmalloc(
12209 +                       sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair), 
12210 +                       GFP_KERNEL);
12211 +               if (!sc->sc_chnfifo[i])
12212 +                       goto out;
12213 +               memset(sc->sc_chnfifo[i], 0, 
12214 +                       sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair));
12215 +       }
12216 +
12217 +       /* reset and initialize the SEC h/w device */
12218 +       talitos_reset_device(sc);
12219 +       talitos_init_device(sc);
12220 +
12221 +       sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
12222 +       if (sc->sc_cid < 0) {
12223 +               printk(KERN_ERR "%s: could not get crypto driver id\n",
12224 +                               device_get_nameunit(sc->sc_cdev));
12225 +               goto out;
12226 +       }
12227 +
12228 +       /* register algorithms with the framework */
12229 +       printk("%s:", device_get_nameunit(sc->sc_cdev));
12230 +
12231 +       if (sc->sc_exec_units & TALITOS_HAS_EU_RNG)  {
12232 +               printk(" rng");
12233 +#ifdef CONFIG_OCF_RANDOMHARVEST
12234 +               talitos_rng_init(sc);
12235 +               crypto_rregister(sc->sc_cid, talitos_read_random, sc);
12236 +#endif
12237 +       }
12238 +       if (sc->sc_exec_units & TALITOS_HAS_EU_DEU) {
12239 +               printk(" des/3des");
12240 +               crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
12241 +               crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
12242 +       }
12243 +       if (sc->sc_exec_units & TALITOS_HAS_EU_AESU) {
12244 +               printk(" aes");
12245 +               crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
12246 +       }
12247 +       if (sc->sc_exec_units & TALITOS_HAS_EU_MDEU) {
12248 +               printk(" md5");
12249 +               crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
12250 +               /* HMAC support only with IPsec for now */
12251 +               crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
12252 +               printk(" sha1");
12253 +               crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
12254 +               /* HMAC support only with IPsec for now */
12255 +               crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
12256 +       }
12257 +       printk("\n");
12258 +       return 0;
12259 +
12260 +out:
12261 +#ifndef CONFIG_PPC_MERGE
12262 +       talitos_remove(pdev);
12263 +#endif
12264 +       return -ENOMEM;
12265 +}
12266 +
12267 +#ifdef CONFIG_PPC_MERGE
12268 +static int talitos_remove(struct of_device *ofdev)
12269 +#else
12270 +static int talitos_remove(struct platform_device *pdev)
12271 +#endif
12272 +{
12273 +#ifdef CONFIG_PPC_MERGE
12274 +       struct talitos_softc *sc = dev_get_drvdata(&ofdev->dev);
12275 +#else
12276 +       struct talitos_softc *sc = platform_get_drvdata(pdev);
12277 +#endif
12278 +       int i;
12279 +
12280 +       DPRINTF("%s()\n", __FUNCTION__);
12281 +       if (sc->sc_cid >= 0)
12282 +               crypto_unregister_all(sc->sc_cid);
12283 +       if (sc->sc_chnfifo) {
12284 +               for (i = 0; i < sc->sc_num_channels; i++)
12285 +                       if (sc->sc_chnfifo[i])
12286 +                               kfree(sc->sc_chnfifo[i]);
12287 +               kfree(sc->sc_chnfifo);
12288 +       }
12289 +       if (sc->sc_chnlastalg)
12290 +               kfree(sc->sc_chnlastalg);
12291 +       if (sc->sc_chnfifolock)
12292 +               kfree(sc->sc_chnfifolock);
12293 +       if (sc->sc_irq != -1)
12294 +               free_irq(sc->sc_irq, sc);
12295 +       if (sc->sc_base_addr)
12296 +               iounmap((void *) sc->sc_base_addr);
12297 +       kfree(sc);
12298 +       return 0;
12299 +}
12300 +
12301 +#ifdef CONFIG_PPC_MERGE
12302 +static struct of_device_id talitos_match[] = {
12303 +       {
12304 +               .type = "crypto",
12305 +               .compatible = "talitos",
12306 +       },
12307 +       {},
12308 +};
12309 +
12310 +MODULE_DEVICE_TABLE(of, talitos_match);
12311 +
12312 +static struct of_platform_driver talitos_driver = {
12313 +       .name           = DRV_NAME,
12314 +       .match_table    = talitos_match,
12315 +       .probe          = talitos_probe,
12316 +       .remove         = talitos_remove,
12317 +};
12318 +
12319 +static int __init talitos_init(void)
12320 +{
12321 +       return of_register_platform_driver(&talitos_driver);
12322 +}
12323 +
12324 +static void __exit talitos_exit(void)
12325 +{
12326 +       of_unregister_platform_driver(&talitos_driver);
12327 +}
12328 +#else
12329 +/* Structure for a platform device driver */
12330 +static struct platform_driver talitos_driver = {
12331 +       .probe = talitos_probe,
12332 +       .remove = talitos_remove,
12333 +       .driver = {
12334 +               .name = "fsl-sec2",
12335 +       }
12336 +};
12337 +
12338 +static int __init talitos_init(void)
12339 +{
12340 +       return platform_driver_register(&talitos_driver);
12341 +}
12342 +
12343 +static void __exit talitos_exit(void)
12344 +{
12345 +       platform_driver_unregister(&talitos_driver);
12346 +}
12347 +#endif
12348 +
12349 +module_init(talitos_init);
12350 +module_exit(talitos_exit);
12351 +
12352 +MODULE_LICENSE("Dual BSD/GPL");
12353 +MODULE_AUTHOR("kim.phillips@freescale.com");
12354 +MODULE_DESCRIPTION("OCF driver for Freescale SEC (talitos)");
12355 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
12356 +++ linux/crypto/ocf/talitos/talitos_soft.h     2007-07-20 11:47:16.000000000 +1000
12357 @@ -0,0 +1,77 @@
12358 +/*
12359 + * Freescale SEC data structures for integration with ocf-linux
12360 + *
12361 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
12362 + *
12363 + * Redistribution and use in source and binary forms, with or without
12364 + * modification, are permitted provided that the following conditions
12365 + * are met:
12366 + *
12367 + * 1. Redistributions of source code must retain the above copyright
12368 + *    notice, this list of conditions and the following disclaimer.
12369 + * 2. Redistributions in binary form must reproduce the above copyright
12370 + *    notice, this list of conditions and the following disclaimer in the
12371 + *    documentation and/or other materials provided with the distribution.
12372 + * 3. The name of the author may not be used to endorse or promote products
12373 + *    derived from this software without specific prior written permission.
12374 + *
12375 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
12376 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
12377 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
12378 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
12379 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12380 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
12381 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
12382 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12383 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
12384 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12385 + */
12386 +
12387 +/*
12388 + * paired descriptor and associated crypto operation
12389 + */
12390 +struct desc_cryptop_pair {
12391 +       struct talitos_desc     cf_desc;        /* descriptor ptr */
12392 +       struct cryptop          *cf_crp;        /* cryptop ptr */
12393 +};
12394 +
12395 +/*
12396 + * Holds data specific to a single talitos device.
12397 + */
12398 +struct talitos_softc {
12399 +       softc_device_decl       sc_cdev;
12400 +       struct platform_device  *sc_dev;        /* device backpointer */
12401 +       ocf_iomem_t             sc_base_addr;
12402 +       int                     sc_irq;
12403 +       int                     sc_num;         /* if we have multiple chips */
12404 +       int32_t                 sc_cid;         /* crypto tag */
12405 +       u64                     sc_chiprev;     /* major/minor chip revision */
12406 +       int                     sc_nsessions;
12407 +       struct talitos_session  *sc_sessions;
12408 +       int                     sc_num_channels;/* number of crypto channels */
12409 +       int                     sc_chfifo_len;  /* channel fetch fifo len */
12410 +       int                     sc_exec_units;  /* execution units mask */
12411 +       int                     sc_desc_types;  /* descriptor types mask */
12412 +       /*
12413 +        * mutual exclusion for intra-channel resources, e.g. fetch fifos
12414 +        * the last entry is a meta-channel lock used by the channel scheduler
12415 +        */
12416 +       spinlock_t              *sc_chnfifolock;
12417 +       /* sc_chnlastalgo contains last algorithm for that channel */
12418 +       int                     *sc_chnlastalg;
12419 +       /* sc_chnfifo holds pending descriptor--crypto operation pairs */
12420 +       struct desc_cryptop_pair        **sc_chnfifo;
12421 +};
12422 +
12423 +struct talitos_session {
12424 +       u_int32_t       ses_used;
12425 +       u_int32_t       ses_klen;               /* key length in bits */
12426 +       u_int32_t       ses_key[8];             /* DES/3DES/AES key */
12427 +       u_int32_t       ses_hmac[5];            /* hmac inner state */
12428 +       u_int32_t       ses_hmac_len;           /* hmac length */
12429 +       u_int32_t       ses_iv[4];              /* DES/3DES/AES iv */
12430 +       u_int32_t       ses_mlen;               /* desired hash result len (12=ipsec or 16) */
12431 +};
12432 +
12433 +#define        TALITOS_SESSION(sid)    ((sid) & 0x0fffffff)
12434 +#define        TALITOS_SID(crd, sesn)  (((crd) << 28) | ((sesn) & 0x0fffffff))
12435 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
12436 +++ linux/crypto/ocf/talitos/talitos_dev.h      2007-11-23 07:31:44.000000000 +1000
12437 @@ -0,0 +1,277 @@
12438 +/*
12439 + * Freescale SEC (talitos) device dependent data structures
12440 + *
12441 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
12442 + *
12443 + * Redistribution and use in source and binary forms, with or without
12444 + * modification, are permitted provided that the following conditions
12445 + * are met:
12446 + *
12447 + * 1. Redistributions of source code must retain the above copyright
12448 + *    notice, this list of conditions and the following disclaimer.
12449 + * 2. Redistributions in binary form must reproduce the above copyright
12450 + *    notice, this list of conditions and the following disclaimer in the
12451 + *    documentation and/or other materials provided with the distribution.
12452 + * 3. The name of the author may not be used to endorse or promote products
12453 + *    derived from this software without specific prior written permission.
12454 + *
12455 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
12456 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
12457 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
12458 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
12459 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12460 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
12461 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
12462 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12463 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
12464 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12465 + *
12466 + */
12467 +
12468 +/* device ID register values */
12469 +#define TALITOS_ID_SEC_2_0     0x40
12470 +#define TALITOS_ID_SEC_2_1     0x40 /* cross ref with IP block revision reg */
12471 +
12472 +/*
12473 + * following num_channels, channel-fifo-depth, exec-unit-mask, and 
12474 + * descriptor-types-mask are for forward-compatibility with openfirmware
12475 + * flat device trees
12476 + */
12477 +
12478 +/*
12479 + *  num_channels : the number of channels available in each SEC version.
12480 + */
12481 +
12482 +/* n.b. this driver requires these values be a power of 2 */
12483 +#define TALITOS_NCHANNELS_SEC_1_0      4
12484 +#define TALITOS_NCHANNELS_SEC_1_2      1
12485 +#define TALITOS_NCHANNELS_SEC_2_0      4
12486 +#define TALITOS_NCHANNELS_SEC_2_01     4
12487 +#define TALITOS_NCHANNELS_SEC_2_1      4
12488 +#define TALITOS_NCHANNELS_SEC_2_4      4
12489 +
12490 +/*
12491 + *  channel-fifo-depth : The number of descriptor
12492 + *  pointers a channel fetch fifo can hold.
12493 + */
12494 +#define TALITOS_CHFIFOLEN_SEC_1_0      1
12495 +#define TALITOS_CHFIFOLEN_SEC_1_2      1
12496 +#define TALITOS_CHFIFOLEN_SEC_2_0      24
12497 +#define TALITOS_CHFIFOLEN_SEC_2_01     24
12498 +#define TALITOS_CHFIFOLEN_SEC_2_1      24
12499 +#define TALITOS_CHFIFOLEN_SEC_2_4      24
12500 +
12501 +/* 
12502 + *  exec-unit-mask : The bitmask representing what Execution Units (EUs)
12503 + *  are available. EU information should be encoded following the SEC's 
12504 + *  EU_SEL0 bitfield documentation, i.e. as follows:
12505 + * 
12506 + *    bit 31 = set if SEC permits no-EU selection (should be always set)
12507 + *    bit 30 = set if SEC has the ARC4 EU (AFEU)
12508 + *    bit 29 = set if SEC has the des/3des EU (DEU)
12509 + *    bit 28 = set if SEC has the message digest EU (MDEU)
12510 + *    bit 27 = set if SEC has the random number generator EU (RNG)
12511 + *    bit 26 = set if SEC has the public key EU (PKEU)
12512 + *    bit 25 = set if SEC has the aes EU (AESU)
12513 + *    bit 24 = set if SEC has the Kasumi EU (KEU)
12514 + * 
12515 + */
12516 +#define TALITOS_HAS_EU_NONE            (1<<0)
12517 +#define TALITOS_HAS_EU_AFEU            (1<<1)
12518 +#define TALITOS_HAS_EU_DEU             (1<<2)
12519 +#define TALITOS_HAS_EU_MDEU            (1<<3)
12520 +#define TALITOS_HAS_EU_RNG             (1<<4)
12521 +#define TALITOS_HAS_EU_PKEU            (1<<5)
12522 +#define TALITOS_HAS_EU_AESU            (1<<6)
12523 +#define TALITOS_HAS_EU_KEU             (1<<7)
12524 +
12525 +/* the corresponding masks for each SEC version */
12526 +#define TALITOS_HAS_EUS_SEC_1_0                0x7f
12527 +#define TALITOS_HAS_EUS_SEC_1_2                0x4d
12528 +#define TALITOS_HAS_EUS_SEC_2_0                0x7f
12529 +#define TALITOS_HAS_EUS_SEC_2_01       0x7f
12530 +#define TALITOS_HAS_EUS_SEC_2_1                0xff
12531 +#define TALITOS_HAS_EUS_SEC_2_4                0x7f
12532 +
12533 +/*
12534 + *  descriptor-types-mask : The bitmask representing what descriptors
12535 + *  are available. Descriptor type information should be encoded 
12536 + *  following the SEC's Descriptor Header Dword DESC_TYPE field 
12537 + *  documentation, i.e. as follows:
12538 + *
12539 + *    bit 0  = set if SEC supports the aesu_ctr_nonsnoop desc. type
12540 + *    bit 1  = set if SEC supports the ipsec_esp descriptor type
12541 + *    bit 2  = set if SEC supports the common_nonsnoop desc. type
12542 + *    bit 3  = set if SEC supports the 802.11i AES ccmp desc. type
12543 + *    bit 4  = set if SEC supports the hmac_snoop_no_afeu desc. type
12544 + *    bit 5  = set if SEC supports the srtp descriptor type
12545 + *    bit 6  = set if SEC supports the non_hmac_snoop_no_afeu desc.type
12546 + *    bit 7  = set if SEC supports the pkeu_assemble descriptor type
12547 + *    bit 8  = set if SEC supports the aesu_key_expand_output desc.type
12548 + *    bit 9  = set if SEC supports the pkeu_ptmul descriptor type
12549 + *    bit 10 = set if SEC supports the common_nonsnoop_afeu desc. type
12550 + *    bit 11 = set if SEC supports the pkeu_ptadd_dbl descriptor type
12551 + *
12552 + *  ..and so on and so forth.
12553 + */
12554 +#define TALITOS_HAS_DT_AESU_CTR_NONSNOOP       (1<<0)
12555 +#define TALITOS_HAS_DT_IPSEC_ESP               (1<<1)
12556 +#define TALITOS_HAS_DT_COMMON_NONSNOOP         (1<<2)
12557 +
12558 +/* the corresponding masks for each SEC version */
12559 +#define TALITOS_HAS_DESCTYPES_SEC_2_0  0x01010ebf
12560 +#define TALITOS_HAS_DESCTYPES_SEC_2_1  0x012b0ebf
12561 +
12562 +/* 
12563 + * a TALITOS_xxx_HI address points to the low data bits (32-63) of the register
12564 + */
12565 +
12566 +/* global register offset addresses */
12567 +#define TALITOS_ID             0x1020
12568 +#define TALITOS_ID_HI          0x1024
12569 +#define TALITOS_MCR            0x1030          /* master control register */
12570 +#define TALITOS_MCR_HI         0x1038          /* master control register */
12571 +#define TALITOS_MCR_SWR                0x1
12572 +#define TALITOS_IMR            0x1008          /* interrupt mask register */
12573 +#define TALITOS_IMR_ALL                0x00010fff      /* enable all interrupts mask */
12574 +#define TALITOS_IMR_ERRONLY    0x00010aaa      /* enable error interrupts */
12575 +#define TALITOS_IMR_HI         0x100C          /* interrupt mask register */
12576 +#define TALITOS_IMR_HI_ALL     0x00323333      /* enable all interrupts mask */
12577 +#define TALITOS_IMR_HI_ERRONLY 0x00222222      /* enable error interrupts */
12578 +#define TALITOS_ISR            0x1010          /* interrupt status register */
12579 +#define TALITOS_ISR_ERROR      0x00010faa      /* errors mask */
12580 +#define TALITOS_ISR_DONE       0x00000055      /* channel(s) done mask */
12581 +#define TALITOS_ISR_HI         0x1014          /* interrupt status register */
12582 +#define TALITOS_ICR            0x1018          /* interrupt clear register */
12583 +#define TALITOS_ICR_HI         0x101C          /* interrupt clear register */
12584 +
12585 +/* channel register address stride */
12586 +#define TALITOS_CH_OFFSET      0x100
12587 +
12588 +/* channel register offset addresses and bits */
12589 +#define TALITOS_CH_CCCR                0x1108  /* Crypto-Channel Config Register */
12590 +#define TALITOS_CH_CCCR_RESET  0x1     /* Channel Reset bit */
12591 +#define TALITOS_CH_CCCR_HI     0x110c  /* Crypto-Channel Config Register */
12592 +#define TALITOS_CH_CCCR_HI_CDWE        0x10    /* Channel done writeback enable bit */
12593 +#define TALITOS_CH_CCCR_HI_NT  0x4     /* Notification type bit */
12594 +#define TALITOS_CH_CCCR_HI_CDIE        0x2     /* Channel Done Interrupt Enable bit */
12595 +#define TALITOS_CH_CCPSR       0x1110  /* Crypto-Channel Pointer Status Reg */
12596 +#define TALITOS_CH_CCPSR_HI    0x1114  /* Crypto-Channel Pointer Status Reg */
12597 +#define TALITOS_CH_FF          0x1148  /* Fetch FIFO */
12598 +#define TALITOS_CH_FF_HI       0x114c  /* Fetch FIFO's FETCH_ADRS */
12599 +#define TALITOS_CH_CDPR                0x1140  /* Crypto-Channel Pointer Status Reg */
12600 +#define TALITOS_CH_CDPR_HI     0x1144  /* Crypto-Channel Pointer Status Reg */
12601 +#define TALITOS_CH_DESCBUF     0x1180  /* (thru 11bf) Crypto-Channel 
12602 +                                        * Descriptor Buffer (debug) */
12603 +
12604 +/* execution unit register offset addresses and bits */
12605 +#define TALITOS_DEUSR          0x2028  /* DEU status register */
12606 +#define TALITOS_DEUSR_HI       0x202c  /* DEU status register */
12607 +#define TALITOS_DEUISR         0x2030  /* DEU interrupt status register */
12608 +#define TALITOS_DEUISR_HI      0x2034  /* DEU interrupt status register */
12609 +#define TALITOS_DEUICR         0x2038  /* DEU interrupt control register */
12610 +#define TALITOS_DEUICR_HI      0x203c  /* DEU interrupt control register */
12611 +#define TALITOS_AESUISR                0x4030  /* AESU interrupt status register */
12612 +#define TALITOS_AESUISR_HI     0x4034  /* AESU interrupt status register */
12613 +#define TALITOS_AESUICR                0x4038  /* AESU interrupt control register */
12614 +#define TALITOS_AESUICR_HI     0x403c  /* AESU interrupt control register */
12615 +#define TALITOS_MDEUISR                0x6030  /* MDEU interrupt status register */
12616 +#define TALITOS_MDEUISR_HI     0x6034  /* MDEU interrupt status register */
12617 +#define TALITOS_RNGSR          0xa028  /* RNG status register */
12618 +#define TALITOS_RNGSR_HI       0xa02c  /* RNG status register */
12619 +#define TALITOS_RNGSR_HI_RD    0x1     /* RNG Reset done */
12620 +#define TALITOS_RNGSR_HI_OFL   0xff0000/* number of dwords in RNG output FIFO*/
12621 +#define TALITOS_RNGDSR         0xa010  /* RNG data size register */
12622 +#define TALITOS_RNGDSR_HI      0xa014  /* RNG data size register */
12623 +#define TALITOS_RNG_FIFO       0xa800  /* RNG FIFO - pool of random numbers */
12624 +#define TALITOS_RNGISR         0xa030  /* RNG Interrupt status register */
12625 +#define TALITOS_RNGISR_HI      0xa034  /* RNG Interrupt status register */
12626 +#define TALITOS_RNGRCR         0xa018  /* RNG Reset control register */
12627 +#define TALITOS_RNGRCR_HI      0xa01c  /* RNG Reset control register */
12628 +#define TALITOS_RNGRCR_HI_SR   0x1     /* RNG RNGRCR:Software Reset */
12629 +
12630 +/* descriptor pointer entry */
12631 +struct talitos_desc_ptr {
12632 +       u16     len;            /* length */
12633 +       u8      extent;         /* jump (to s/g link table) and extent */
12634 +       u8      res;            /* reserved */
12635 +       u32     ptr;            /* pointer */
12636 +};
12637 +
12638 +/* descriptor */
12639 +struct talitos_desc {
12640 +       u32     hdr;                            /* header */
12641 +       u32     res;                            /* reserved */
12642 +       struct talitos_desc_ptr         ptr[7]; /* ptr/len pair array */
12643 +};
12644 +
12645 +/* talitos descriptor header (hdr) bits */
12646 +
12647 +/* primary execution unit select */
12648 +#define        TALITOS_SEL0_AFEU       0x10000000
12649 +#define        TALITOS_SEL0_DEU        0x20000000
12650 +#define        TALITOS_SEL0_MDEU       0x30000000
12651 +#define        TALITOS_SEL0_RNG        0x40000000
12652 +#define        TALITOS_SEL0_PKEU       0x50000000
12653 +#define        TALITOS_SEL0_AESU       0x60000000
12654 +
12655 +/* primary execution unit mode (MODE0) and derivatives */
12656 +#define        TALITOS_MODE0_AESU_CBC          0x00200000
12657 +#define        TALITOS_MODE0_AESU_ENC          0x00100000
12658 +#define        TALITOS_MODE0_DEU_CBC           0x00400000
12659 +#define        TALITOS_MODE0_DEU_3DES          0x00200000
12660 +#define        TALITOS_MODE0_DEU_ENC           0x00100000
12661 +#define        TALITOS_MODE0_MDEU_INIT         0x01000000      /* init starting regs */
12662 +#define        TALITOS_MODE0_MDEU_HMAC         0x00800000
12663 +#define        TALITOS_MODE0_MDEU_PAD          0x00400000      /* PD */
12664 +#define        TALITOS_MODE0_MDEU_MD5          0x00200000
12665 +#define        TALITOS_MODE0_MDEU_SHA256       0x00100000
12666 +#define        TALITOS_MODE0_MDEU_SHA1         0x00000000      /* SHA-160 */
12667 +#define        TALITOS_MODE0_MDEU_MD5_HMAC     \
12668 +               (TALITOS_MODE0_MDEU_MD5 | TALITOS_MODE0_MDEU_HMAC)
12669 +#define        TALITOS_MODE0_MDEU_SHA256_HMAC  \
12670 +               (TALITOS_MODE0_MDEU_SHA256 | TALITOS_MODE0_MDEU_HMAC)
12671 +#define        TALITOS_MODE0_MDEU_SHA1_HMAC    \
12672 +               (TALITOS_MODE0_MDEU_SHA1 | TALITOS_MODE0_MDEU_HMAC)
12673 +
12674 +/* secondary execution unit select (SEL1) */
12675 +/* it's MDEU or nothing */
12676 +#define        TALITOS_SEL1_MDEU       0x00030000
12677 +
12678 +/* secondary execution unit mode (MODE1) and derivatives */
12679 +#define        TALITOS_MODE1_MDEU_INIT         0x00001000      /* init starting regs */
12680 +#define        TALITOS_MODE1_MDEU_HMAC         0x00000800
12681 +#define        TALITOS_MODE1_MDEU_PAD          0x00000400      /* PD */
12682 +#define        TALITOS_MODE1_MDEU_MD5          0x00000200
12683 +#define        TALITOS_MODE1_MDEU_SHA256       0x00000100
12684 +#define        TALITOS_MODE1_MDEU_SHA1         0x00000000      /* SHA-160 */
12685 +#define        TALITOS_MODE1_MDEU_MD5_HMAC     \
12686 +       (TALITOS_MODE1_MDEU_MD5 | TALITOS_MODE1_MDEU_HMAC)
12687 +#define        TALITOS_MODE1_MDEU_SHA256_HMAC  \
12688 +       (TALITOS_MODE1_MDEU_SHA256 | TALITOS_MODE1_MDEU_HMAC)
12689 +#define        TALITOS_MODE1_MDEU_SHA1_HMAC    \
12690 +       (TALITOS_MODE1_MDEU_SHA1 | TALITOS_MODE1_MDEU_HMAC)
12691 +
12692 +/* direction of overall data flow (DIR) */
12693 +#define        TALITOS_DIR_OUTBOUND    0x00000000
12694 +#define        TALITOS_DIR_INBOUND     0x00000002
12695 +
12696 +/* done notification (DN) */
12697 +#define        TALITOS_DONE_NOTIFY     0x00000001
12698 +
12699 +/* descriptor types */
12700 +/* odd numbers here are valid on SEC2 and greater only (e.g. ipsec_esp) */
12701 +#define TD_TYPE_AESU_CTR_NONSNOOP      (0 << 3)
12702 +#define TD_TYPE_IPSEC_ESP              (1 << 3)
12703 +#define TD_TYPE_COMMON_NONSNOOP_NO_AFEU        (2 << 3)
12704 +#define TD_TYPE_HMAC_SNOOP_NO_AFEU     (4 << 3)
12705 +
12706 +#define TALITOS_HDR_DONE_BITS  0xff000000
12707 +
12708 +#define        DPRINTF(a...)   do { \
12709 +                                               if (debug) { \
12710 +                                                       printk("%s: ", sc ? \
12711 +                                                               device_get_nameunit(sc->sc_cdev) : "talitos"); \
12712 +                                                       printk(a); \
12713 +                                               } \
12714 +                                       } while (0)
12715 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
12716 +++ linux/crypto/ocf/random.c   2007-12-04 13:56:03.000000000 +1000
12717 @@ -0,0 +1,317 @@
12718 +/*
12719 + * A system independant way of adding entropy to the kernels pool
12720 + * this way the drivers can focus on the real work and we can take
12721 + * care of pushing it to the appropriate place in the kernel.
12722 + *
12723 + * This should be fast and callable from timers/interrupts
12724 + *
12725 + * Written by David McCullough <david_mccullough@securecomputing.com>
12726 + * Copyright (C) 2006-2007 David McCullough
12727 + * Copyright (C) 2004-2005 Intel Corporation.
12728 + *
12729 + * LICENSE TERMS
12730 + *
12731 + * The free distribution and use of this software in both source and binary
12732 + * form is allowed (with or without changes) provided that:
12733 + *
12734 + *   1. distributions of this source code include the above copyright
12735 + *      notice, this list of conditions and the following disclaimer;
12736 + *
12737 + *   2. distributions in binary form include the above copyright
12738 + *      notice, this list of conditions and the following disclaimer
12739 + *      in the documentation and/or other associated materials;
12740 + *
12741 + *   3. the copyright holder's name is not used to endorse products
12742 + *      built using this software without specific written permission.
12743 + *
12744 + * ALTERNATIVELY, provided that this notice is retained in full, this product
12745 + * may be distributed under the terms of the GNU General Public License (GPL),
12746 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
12747 + *
12748 + * DISCLAIMER
12749 + *
12750 + * This software is provided 'as is' with no explicit or implied warranties
12751 + * in respect of its properties, including, but not limited to, correctness
12752 + * and/or fitness for purpose.
12753 + */
12754 +
12755 +#ifndef AUTOCONF_INCLUDED
12756 +#include <linux/config.h>
12757 +#endif
12758 +#include <linux/module.h>
12759 +#include <linux/init.h>
12760 +#include <linux/list.h>
12761 +#include <linux/slab.h>
12762 +#include <linux/wait.h>
12763 +#include <linux/sched.h>
12764 +#include <linux/spinlock.h>
12765 +#include <linux/version.h>
12766 +#include <linux/unistd.h>
12767 +#include <linux/poll.h>
12768 +#include <linux/random.h>
12769 +#include <cryptodev.h>
12770 +
12771 +#ifdef CONFIG_OCF_FIPS
12772 +#include "rndtest.h"
12773 +#endif
12774 +
12775 +#ifndef HAS_RANDOM_INPUT_WAIT
12776 +#error "Please do not enable OCF_RANDOMHARVEST unless you have applied patches"
12777 +#endif
12778 +
12779 +/*
12780 + * a hack to access the debug levels from the crypto driver
12781 + */
12782 +extern int crypto_debug;
12783 +#define debug crypto_debug
12784 +
12785 +/*
12786 + * a list of all registered random providers
12787 + */
12788 +static LIST_HEAD(random_ops);
12789 +static int started = 0;
12790 +static int initted = 0;
12791 +
12792 +struct random_op {
12793 +       struct list_head random_list;
12794 +       u_int32_t driverid;
12795 +       int (*read_random)(void *arg, u_int32_t *buf, int len);
12796 +       void *arg;
12797 +};
12798 +
12799 +static int random_proc(void *arg);
12800 +
12801 +static pid_t           randomproc = (pid_t) -1;
12802 +static spinlock_t      random_lock;
12803 +
12804 +/*
12805 + * just init the spin locks
12806 + */
12807 +static int
12808 +crypto_random_init(void)
12809 +{
12810 +       spin_lock_init(&random_lock);
12811 +       initted = 1;
12812 +       return(0);
12813 +}
12814 +
12815 +/*
12816 + * Add the given random reader to our list (if not present)
12817 + * and start the thread (if not already started)
12818 + *
12819 + * we have to assume that driver id is ok for now
12820 + */
12821 +int
12822 +crypto_rregister(
12823 +       u_int32_t driverid,
12824 +       int (*read_random)(void *arg, u_int32_t *buf, int len),
12825 +       void *arg)
12826 +{
12827 +       unsigned long flags;
12828 +       int ret = 0;
12829 +       struct random_op        *rops, *tmp;
12830 +
12831 +       dprintk("%s,%d: %s(0x%x, %p, %p)\n", __FILE__, __LINE__,
12832 +                       __FUNCTION__, driverid, read_random, arg);
12833 +
12834 +       if (!initted)
12835 +               crypto_random_init();
12836 +
12837 +#if 0
12838 +       struct cryptocap        *cap;
12839 +
12840 +       cap = crypto_checkdriver(driverid);
12841 +       if (!cap)
12842 +               return EINVAL;
12843 +#endif
12844 +
12845 +       list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
12846 +               if (rops->driverid == driverid && rops->read_random == read_random)
12847 +                       return EEXIST;
12848 +       }
12849 +
12850 +       rops = (struct random_op *) kmalloc(sizeof(*rops), GFP_KERNEL);
12851 +       if (!rops)
12852 +               return ENOMEM;
12853 +
12854 +       rops->driverid    = driverid;
12855 +       rops->read_random = read_random;
12856 +       rops->arg = arg;
12857 +
12858 +       spin_lock_irqsave(&random_lock, flags);
12859 +       list_add_tail(&rops->random_list, &random_ops);
12860 +       if (!started) {
12861 +               randomproc = kernel_thread(random_proc, NULL, CLONE_FS|CLONE_FILES);
12862 +               if (randomproc < 0) {
12863 +                       ret = randomproc;
12864 +                       printk("crypto: crypto_rregister cannot start random thread; "
12865 +                                       "error %d", ret);
12866 +               } else
12867 +                       started = 1;
12868 +       }
12869 +       spin_unlock_irqrestore(&random_lock, flags);
12870 +
12871 +       return ret;
12872 +}
12873 +EXPORT_SYMBOL(crypto_rregister);
12874 +
12875 +int
12876 +crypto_runregister_all(u_int32_t driverid)
12877 +{
12878 +       struct random_op *rops, *tmp;
12879 +       unsigned long flags;
12880 +
12881 +       dprintk("%s,%d: %s(0x%x)\n", __FILE__, __LINE__, __FUNCTION__, driverid);
12882 +
12883 +       list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
12884 +               if (rops->driverid == driverid) {
12885 +                       list_del(&rops->random_list);
12886 +                       kfree(rops);
12887 +               }
12888 +       }
12889 +
12890 +       spin_lock_irqsave(&random_lock, flags);
12891 +       if (list_empty(&random_ops) && started)
12892 +               kill_proc(randomproc, SIGKILL, 1);
12893 +       spin_unlock_irqrestore(&random_lock, flags);
12894 +       return(0);
12895 +}
12896 +EXPORT_SYMBOL(crypto_runregister_all);
12897 +
12898 +/*
12899 + * while we can add entropy to random.c continue to read random data from
12900 + * the drivers and push it to random.
12901 + */
12902 +static int
12903 +random_proc(void *arg)
12904 +{
12905 +       int n;
12906 +       int wantcnt;
12907 +       int bufcnt = 0;
12908 +       int retval = 0;
12909 +       int *buf = NULL;
12910 +
12911 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
12912 +       daemonize();
12913 +       spin_lock_irq(&current->sigmask_lock);
12914 +       sigemptyset(&current->blocked);
12915 +       recalc_sigpending(current);
12916 +       spin_unlock_irq(&current->sigmask_lock);
12917 +       sprintf(current->comm, "ocf-random");
12918 +#else
12919 +       daemonize("ocf-random");
12920 +       allow_signal(SIGKILL);
12921 +#endif
12922 +
12923 +       (void) get_fs();
12924 +       set_fs(get_ds());
12925 +
12926 +#ifdef CONFIG_OCF_FIPS
12927 +#define NUM_INT (RNDTEST_NBYTES/sizeof(int))
12928 +#else
12929 +#define NUM_INT 32
12930 +#endif
12931 +
12932 +       /*
12933 +        * some devices can transferr their RNG data direct into memory,
12934 +        * so make sure it is device friendly
12935 +        */
12936 +       buf = kmalloc(NUM_INT * sizeof(int), GFP_DMA);
12937 +       if (NULL == buf) {
12938 +               printk("crypto: RNG could not allocate memory\n");
12939 +               retval = -ENOMEM;
12940 +               goto bad_alloc;
12941 +       }
12942 +
12943 +       wantcnt = NUM_INT;   /* start by adding some entropy */
12944 +
12945 +       /*
12946 +        * its possible due to errors or driver removal that we no longer
12947 +        * have anything to do,  if so exit or we will consume all the CPU
12948 +        * doing nothing
12949 +        */
12950 +       while (!list_empty(&random_ops)) {
12951 +               struct random_op        *rops, *tmp;
12952 +
12953 +#ifdef CONFIG_OCF_FIPS
12954 +               if (wantcnt)
12955 +                       wantcnt = NUM_INT; /* FIPs mode can do 20000 bits or none */
12956 +#endif
12957 +
12958 +               /* see if we can get enough entropy to make the world
12959 +                * a better place.
12960 +                */
12961 +               while (bufcnt < wantcnt && bufcnt < NUM_INT) {
12962 +                       list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
12963 +
12964 +                               n = (*rops->read_random)(rops->arg, &buf[bufcnt],
12965 +                                                        NUM_INT - bufcnt);
12966 +
12967 +                               /* on failure remove the random number generator */
12968 +                               if (n == -1) {
12969 +                                       list_del(&rops->random_list);
12970 +                                       printk("crypto: RNG (driverid=0x%x) failed, disabling\n",
12971 +                                                       rops->driverid);
12972 +                                       kfree(rops);
12973 +                               } else if (n > 0)
12974 +                                       bufcnt += n;
12975 +                       }
12976 +                       /* give up CPU for a bit, just in case as this is a loop */
12977 +                       schedule();
12978 +               }
12979 +
12980 +
12981 +#ifdef CONFIG_OCF_FIPS
12982 +               if (bufcnt > 0 && rndtest_buf((unsigned char *) &buf[0])) {
12983 +                       dprintk("crypto: buffer had fips errors, discarding\n");
12984 +                       bufcnt = 0;
12985 +               }
12986 +#endif
12987 +
12988 +               /*
12989 +                * if we have a certified buffer,  we can send some data
12990 +                * to /dev/random and move along
12991 +                */
12992 +               if (bufcnt > 0) {
12993 +                       /* add what we have */
12994 +                       random_input_words(buf, bufcnt, bufcnt*sizeof(int)*8);
12995 +                       bufcnt = 0;
12996 +               }
12997 +
12998 +               /* give up CPU for a bit so we don't hog while filling */
12999 +               schedule();
13000 +
13001 +               /* wait for needing more */
13002 +               wantcnt = random_input_wait();
13003 +
13004 +               if (wantcnt <= 0)
13005 +                       wantcnt = 0; /* try to get some info again */
13006 +               else
13007 +                       /* round up to one word or we can loop forever */
13008 +                       wantcnt = (wantcnt + (sizeof(int)*8)) / (sizeof(int)*8);
13009 +               if (wantcnt > NUM_INT) {
13010 +                       wantcnt = NUM_INT;
13011 +               }
13012 +
13013 +               if (signal_pending(current)) {
13014 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
13015 +                       spin_lock_irq(&current->sigmask_lock);
13016 +#endif
13017 +                       flush_signals(current);
13018 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
13019 +                       spin_unlock_irq(&current->sigmask_lock);
13020 +#endif
13021 +               }
13022 +       }
13023 +       
13024 +       kfree(buf);
13025 +
13026 +bad_alloc:
13027 +       spin_lock_irq(&random_lock);
13028 +       randomproc = (pid_t) -1;
13029 +       started = 0;
13030 +       spin_unlock_irq(&random_lock);
13031 +
13032 +       return retval;
13033 +}
13034 +
13035 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
13036 +++ linux/crypto/ocf/ocf-bench.c        2007-07-23 22:26:12.000000000 +1000
13037 @@ -0,0 +1,436 @@
13038 +/*
13039 + * A loadable module that benchmarks the OCF crypto speed from kernel space.
13040 + *
13041 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com>
13042 + *
13043 + * LICENSE TERMS
13044 + *
13045 + * The free distribution and use of this software in both source and binary
13046 + * form is allowed (with or without changes) provided that:
13047 + *
13048 + *   1. distributions of this source code include the above copyright
13049 + *      notice, this list of conditions and the following disclaimer;
13050 + *
13051 + *   2. distributions in binary form include the above copyright
13052 + *      notice, this list of conditions and the following disclaimer
13053 + *      in the documentation and/or other associated materials;
13054 + *
13055 + *   3. the copyright holder's name is not used to endorse products
13056 + *      built using this software without specific written permission.
13057 + *
13058 + * ALTERNATIVELY, provided that this notice is retained in full, this product
13059 + * may be distributed under the terms of the GNU General Public License (GPL),
13060 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
13061 + *
13062 + * DISCLAIMER
13063 + *
13064 + * This software is provided 'as is' with no explicit or implied warranties
13065 + * in respect of its properties, including, but not limited to, correctness
13066 + * and/or fitness for purpose.
13067 + */
13068 +
13069 +
13070 +#ifndef AUTOCONF_INCLUDED
13071 +#include <linux/config.h>
13072 +#endif
13073 +#include <linux/module.h>
13074 +#include <linux/init.h>
13075 +#include <linux/list.h>
13076 +#include <linux/slab.h>
13077 +#include <linux/wait.h>
13078 +#include <linux/sched.h>
13079 +#include <linux/spinlock.h>
13080 +#include <linux/version.h>
13081 +#include <linux/interrupt.h>
13082 +#include <cryptodev.h>
13083 +
13084 +#ifdef I_HAVE_AN_XSCALE_WITH_INTEL_SDK
13085 +#define BENCH_IXP_ACCESS_LIB 1
13086 +#endif
13087 +#ifdef BENCH_IXP_ACCESS_LIB
13088 +#include <IxTypes.h>
13089 +#include <IxOsBuffMgt.h>
13090 +#include <IxNpeDl.h>
13091 +#include <IxCryptoAcc.h>
13092 +#include <IxQMgr.h>
13093 +#include <IxOsServices.h>
13094 +#include <IxOsCacheMMU.h>
13095 +#endif
13096 +
13097 +/*
13098 + * support for access lib version 1.4
13099 + */
13100 +#ifndef IX_MBUF_PRIV
13101 +#define IX_MBUF_PRIV(x) ((x)->priv)
13102 +#endif
13103 +
13104 +/*
13105 + * the number of simultaneously active requests
13106 + */
13107 +static int request_q_len = 20;
13108 +module_param(request_q_len, int, 0);
13109 +MODULE_PARM_DESC(request_q_len, "Number of outstanding requests");
13110 +/*
13111 + * how many requests we want to have processed
13112 + */
13113 +static int request_num = 1024;
13114 +module_param(request_num, int, 0);
13115 +MODULE_PARM_DESC(request_num, "run for at least this many requests");
13116 +/*
13117 + * the size of each request
13118 + */
13119 +static int request_size = 1500;
13120 +module_param(request_size, int, 0);
13121 +MODULE_PARM_DESC(request_size, "size of each request");
13122 +
13123 +/*
13124 + * a structure for each request
13125 + */
13126 +typedef struct  {
13127 +       struct work_struct work;
13128 +#ifdef BENCH_IXP_ACCESS_LIB
13129 +       IX_MBUF mbuf;
13130 +#endif
13131 +       unsigned char *buffer;
13132 +} request_t;
13133 +
13134 +static request_t *requests;
13135 +
13136 +static int outstanding;
13137 +static int total;
13138 +
13139 +/*************************************************************************/
13140 +/*
13141 + * OCF benchmark routines
13142 + */
13143 +
13144 +static uint64_t ocf_cryptoid;
13145 +static int ocf_init(void);
13146 +static int ocf_cb(struct cryptop *crp);
13147 +static void ocf_request(void *arg);
13148 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13149 +static void ocf_request_wq(struct work_struct *work);
13150 +#endif
13151 +
13152 +static int
13153 +ocf_init(void)
13154 +{
13155 +       int error;
13156 +       struct cryptoini crie, cria;
13157 +       struct cryptodesc crda, crde;
13158 +
13159 +       memset(&crie, 0, sizeof(crie));
13160 +       memset(&cria, 0, sizeof(cria));
13161 +       memset(&crde, 0, sizeof(crde));
13162 +       memset(&crda, 0, sizeof(crda));
13163 +
13164 +       cria.cri_alg  = CRYPTO_SHA1_HMAC;
13165 +       cria.cri_klen = 20 * 8;
13166 +       cria.cri_key  = "0123456789abcdefghij";
13167 +
13168 +       crie.cri_alg  = CRYPTO_3DES_CBC;
13169 +       crie.cri_klen = 24 * 8;
13170 +       crie.cri_key  = "0123456789abcdefghijklmn";
13171 +
13172 +       crie.cri_next = &cria;
13173 +
13174 +       error = crypto_newsession(&ocf_cryptoid, &crie, 0);
13175 +       if (error) {
13176 +               printk("crypto_newsession failed %d\n", error);
13177 +               return -1;
13178 +       }
13179 +       return 0;
13180 +}
13181 +
13182 +static int
13183 +ocf_cb(struct cryptop *crp)
13184 +{
13185 +       request_t *r = (request_t *) crp->crp_opaque;
13186 +
13187 +       if (crp->crp_etype)
13188 +               printk("Error in OCF processing: %d\n", crp->crp_etype);
13189 +       total++;
13190 +       crypto_freereq(crp);
13191 +       crp = NULL;
13192 +
13193 +       if (total > request_num) {
13194 +               outstanding--;
13195 +               return 0;
13196 +       }
13197 +
13198 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13199 +       INIT_WORK(&r->work, ocf_request_wq);
13200 +#else
13201 +       INIT_WORK(&r->work, ocf_request, r);
13202 +#endif
13203 +       schedule_work(&r->work);
13204 +       return 0;
13205 +}
13206 +
13207 +
13208 +static void
13209 +ocf_request(void *arg)
13210 +{
13211 +       request_t *r = arg;
13212 +       struct cryptop *crp = crypto_getreq(2);
13213 +       struct cryptodesc *crde, *crda;
13214 +
13215 +       if (!crp) {
13216 +               outstanding--;
13217 +               return;
13218 +       }
13219 +
13220 +       crde = crp->crp_desc;
13221 +       crda = crde->crd_next;
13222 +
13223 +       crda->crd_skip = 0;
13224 +       crda->crd_flags = 0;
13225 +       crda->crd_len = request_size;
13226 +       crda->crd_inject = request_size;
13227 +       crda->crd_alg = CRYPTO_SHA1_HMAC;
13228 +       crda->crd_key = "0123456789abcdefghij";
13229 +       crda->crd_klen = 20 * 8;
13230 +
13231 +       crde->crd_skip = 0;
13232 +       crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT;
13233 +       crde->crd_len = request_size;
13234 +       crde->crd_inject = request_size;
13235 +       crde->crd_alg = CRYPTO_3DES_CBC;
13236 +       crde->crd_key = "0123456789abcdefghijklmn";
13237 +       crde->crd_klen = 24 * 8;
13238 +
13239 +       crp->crp_ilen = request_size + 64;
13240 +       crp->crp_flags = CRYPTO_F_CBIMM;
13241 +       crp->crp_buf = (caddr_t) r->buffer;
13242 +       crp->crp_callback = ocf_cb;
13243 +       crp->crp_sid = ocf_cryptoid;
13244 +       crp->crp_opaque = (caddr_t) r;
13245 +       crypto_dispatch(crp);
13246 +}
13247 +
13248 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13249 +static void
13250 +ocf_request_wq(struct work_struct *work)
13251 +{
13252 +       request_t *r = container_of(work, request_t, work);
13253 +       ocf_request(r);
13254 +}
13255 +#endif
13256 +
13257 +/*************************************************************************/
13258 +#ifdef BENCH_IXP_ACCESS_LIB
13259 +/*************************************************************************/
13260 +/*
13261 + * CryptoAcc benchmark routines
13262 + */
13263 +
13264 +static IxCryptoAccCtx ixp_ctx;
13265 +static UINT32 ixp_ctx_id;
13266 +static IX_MBUF ixp_pri;
13267 +static IX_MBUF ixp_sec;
13268 +static int ixp_registered = 0;
13269 +
13270 +static void ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp,
13271 +                                       IxCryptoAccStatus status);
13272 +static void ixp_perform_cb(UINT32 ctx_id, IX_MBUF *sbufp, IX_MBUF *dbufp,
13273 +                                       IxCryptoAccStatus status);
13274 +static void ixp_request(void *arg);
13275 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13276 +static void ixp_request_wq(struct work_struct *work);
13277 +#endif
13278 +
13279 +static int
13280 +ixp_init(void)
13281 +{
13282 +       IxCryptoAccStatus status;
13283 +
13284 +       ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
13285 +       ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13286 +       ixp_ctx.cipherCtx.cipherKeyLen = 24;
13287 +       ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
13288 +       ixp_ctx.cipherCtx.cipherInitialVectorLen = IX_CRYPTO_ACC_DES_IV_64;
13289 +       memcpy(ixp_ctx.cipherCtx.key.cipherKey, "0123456789abcdefghijklmn", 24);
13290 +
13291 +       ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
13292 +       ixp_ctx.authCtx.authDigestLen = 12;
13293 +       ixp_ctx.authCtx.aadLen = 0;
13294 +       ixp_ctx.authCtx.authKeyLen = 20;
13295 +       memcpy(ixp_ctx.authCtx.key.authKey, "0123456789abcdefghij", 20);
13296 +
13297 +       ixp_ctx.useDifferentSrcAndDestMbufs = 0;
13298 +       ixp_ctx.operation = IX_CRYPTO_ACC_OP_ENCRYPT_AUTH ;
13299 +
13300 +       IX_MBUF_MLEN(&ixp_pri)  = IX_MBUF_PKT_LEN(&ixp_pri) = 128;
13301 +       IX_MBUF_MDATA(&ixp_pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
13302 +       IX_MBUF_MLEN(&ixp_sec)  = IX_MBUF_PKT_LEN(&ixp_sec) = 128;
13303 +       IX_MBUF_MDATA(&ixp_sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
13304 +
13305 +       status = ixCryptoAccCtxRegister(&ixp_ctx, &ixp_pri, &ixp_sec,
13306 +                       ixp_register_cb, ixp_perform_cb, &ixp_ctx_id);
13307 +
13308 +       if (IX_CRYPTO_ACC_STATUS_SUCCESS == status) {
13309 +               while (!ixp_registered)
13310 +                       schedule();
13311 +               return ixp_registered < 0 ? -1 : 0;
13312 +       }
13313 +
13314 +       printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
13315 +       return -1;
13316 +}
13317 +
13318 +static void
13319 +ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
13320 +{
13321 +       if (bufp) {
13322 +               IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
13323 +               kfree(IX_MBUF_MDATA(bufp));
13324 +               IX_MBUF_MDATA(bufp) = NULL;
13325 +       }
13326 +
13327 +       if (IX_CRYPTO_ACC_STATUS_WAIT == status)
13328 +               return;
13329 +       if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
13330 +               ixp_registered = 1;
13331 +       else
13332 +               ixp_registered = -1;
13333 +}
13334 +
13335 +static void
13336 +ixp_perform_cb(
13337 +       UINT32 ctx_id,
13338 +       IX_MBUF *sbufp,
13339 +       IX_MBUF *dbufp,
13340 +       IxCryptoAccStatus status)
13341 +{
13342 +       request_t *r = NULL;
13343 +
13344 +       total++;
13345 +       if (total > request_num) {
13346 +               outstanding--;
13347 +               return;
13348 +       }
13349 +
13350 +       if (!sbufp || !(r = IX_MBUF_PRIV(sbufp))) {
13351 +               printk("crappo %p %p\n", sbufp, r);
13352 +               outstanding--;
13353 +               return;
13354 +       }
13355 +
13356 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13357 +       INIT_WORK(&r->work, ixp_request_wq);
13358 +#else
13359 +       INIT_WORK(&r->work, ixp_request, r);
13360 +#endif
13361 +       schedule_work(&r->work);
13362 +}
13363 +
13364 +static void
13365 +ixp_request(void *arg)
13366 +{
13367 +       request_t *r = arg;
13368 +       IxCryptoAccStatus status;
13369 +
13370 +       memset(&r->mbuf, 0, sizeof(r->mbuf));
13371 +       IX_MBUF_MLEN(&r->mbuf) = IX_MBUF_PKT_LEN(&r->mbuf) = request_size + 64;
13372 +       IX_MBUF_MDATA(&r->mbuf) = r->buffer;
13373 +       IX_MBUF_PRIV(&r->mbuf) = r;
13374 +       status = ixCryptoAccAuthCryptPerform(ixp_ctx_id, &r->mbuf, NULL,
13375 +                       0, request_size, 0, request_size, request_size, r->buffer);
13376 +       if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
13377 +               printk("status1 = %d\n", status);
13378 +               outstanding--;
13379 +               return;
13380 +       }
13381 +       return;
13382 +}
13383 +
13384 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13385 +static void
13386 +ixp_request_wq(struct work_struct *work)
13387 +{
13388 +       request_t *r = container_of(work, request_t, work);
13389 +       ixp_request(r);
13390 +}
13391 +#endif
13392 +
13393 +/*************************************************************************/
13394 +#endif /* BENCH_IXP_ACCESS_LIB */
13395 +/*************************************************************************/
13396 +
13397 +int
13398 +ocfbench_init(void)
13399 +{
13400 +       int i, jstart, jstop;
13401 +
13402 +       printk("Crypto Speed tests\n");
13403 +
13404 +       requests = kmalloc(sizeof(request_t) * request_q_len, GFP_KERNEL);
13405 +       if (!requests) {
13406 +               printk("malloc failed\n");
13407 +               return -EINVAL;
13408 +       }
13409 +
13410 +       for (i = 0; i < request_q_len; i++) {
13411 +               /* +64 for return data */
13412 +               requests[i].buffer = kmalloc(request_size + 128, GFP_DMA);
13413 +               if (!requests[i].buffer) {
13414 +                       printk("malloc failed\n");
13415 +                       return -EINVAL;
13416 +               }
13417 +               memset(requests[i].buffer, '0' + i, request_size + 128);
13418 +       }
13419 +
13420 +       /*
13421 +        * OCF benchmark
13422 +        */
13423 +       printk("OCF: testing ...\n");
13424 +       ocf_init();
13425 +       total = outstanding = 0;
13426 +       jstart = jiffies;
13427 +       for (i = 0; i < request_q_len; i++) {
13428 +               outstanding++;
13429 +               ocf_request(&requests[i]);
13430 +       }
13431 +       while (outstanding > 0)
13432 +               schedule();
13433 +       jstop = jiffies;
13434 +
13435 +       printk("OCF: %d requests of %d bytes in %d jiffies\n", total, request_size,
13436 +                       jstop - jstart);
13437 +
13438 +#ifdef BENCH_IXP_ACCESS_LIB
13439 +       /*
13440 +        * IXP benchmark
13441 +        */
13442 +       printk("IXP: testing ...\n");
13443 +       ixp_init();
13444 +       total = outstanding = 0;
13445 +       jstart = jiffies;
13446 +       for (i = 0; i < request_q_len; i++) {
13447 +               outstanding++;
13448 +               ixp_request(&requests[i]);
13449 +       }
13450 +       while (outstanding > 0)
13451 +               schedule();
13452 +       jstop = jiffies;
13453 +
13454 +       printk("IXP: %d requests of %d bytes in %d jiffies\n", total, request_size,
13455 +                       jstop - jstart);
13456 +#endif /* BENCH_IXP_ACCESS_LIB */
13457 +
13458 +       for (i = 0; i < request_q_len; i++)
13459 +               kfree(requests[i].buffer);
13460 +       kfree(requests);
13461 +       return -EINVAL; /* always fail to load so it can be re-run quickly ;-) */
13462 +}
13463 +
13464 +static void __exit ocfbench_exit(void)
13465 +{
13466 +}
13467 +
13468 +module_init(ocfbench_init);
13469 +module_exit(ocfbench_exit);
13470 +
13471 +MODULE_LICENSE("BSD");
13472 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
13473 +MODULE_DESCRIPTION("Benchmark various in-kernel crypto speeds");
13474 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
13475 +++ linux/crypto/ocf/ixp4xx/ixp4xx.c    2008-07-03 10:28:05.000000000 +1000
13476 @@ -0,0 +1,1328 @@
13477 +/*
13478 + * An OCF module that uses Intels IXP CryptACC API to do the crypto.
13479 + * This driver requires the IXP400 Access Library that is available
13480 + * from Intel in order to operate (or compile).
13481 + *
13482 + * Written by David McCullough <david_mccullough@securecomputing.com>
13483 + * Copyright (C) 2006-2007 David McCullough
13484 + * Copyright (C) 2004-2005 Intel Corporation.
13485 + *
13486 + * LICENSE TERMS
13487 + *
13488 + * The free distribution and use of this software in both source and binary
13489 + * form is allowed (with or without changes) provided that:
13490 + *
13491 + *   1. distributions of this source code include the above copyright
13492 + *      notice, this list of conditions and the following disclaimer;
13493 + *
13494 + *   2. distributions in binary form include the above copyright
13495 + *      notice, this list of conditions and the following disclaimer
13496 + *      in the documentation and/or other associated materials;
13497 + *
13498 + *   3. the copyright holder's name is not used to endorse products
13499 + *      built using this software without specific written permission.
13500 + *
13501 + * ALTERNATIVELY, provided that this notice is retained in full, this product
13502 + * may be distributed under the terms of the GNU General Public License (GPL),
13503 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
13504 + *
13505 + * DISCLAIMER
13506 + *
13507 + * This software is provided 'as is' with no explicit or implied warranties
13508 + * in respect of its properties, including, but not limited to, correctness
13509 + * and/or fitness for purpose.
13510 + */
13511 +
13512 +#ifndef AUTOCONF_INCLUDED
13513 +#include <linux/config.h>
13514 +#endif
13515 +#include <linux/module.h>
13516 +#include <linux/init.h>
13517 +#include <linux/list.h>
13518 +#include <linux/slab.h>
13519 +#include <linux/sched.h>
13520 +#include <linux/wait.h>
13521 +#include <linux/crypto.h>
13522 +#include <linux/interrupt.h>
13523 +#include <asm/scatterlist.h>
13524 +
13525 +#include <IxTypes.h>
13526 +#include <IxOsBuffMgt.h>
13527 +#include <IxNpeDl.h>
13528 +#include <IxCryptoAcc.h>
13529 +#include <IxQMgr.h>
13530 +#include <IxOsServices.h>
13531 +#include <IxOsCacheMMU.h>
13532 +
13533 +#include <cryptodev.h>
13534 +#include <uio.h>
13535 +
13536 +#ifndef IX_MBUF_PRIV
13537 +#define IX_MBUF_PRIV(x) ((x)->priv)
13538 +#endif
13539 +
13540 +struct ixp_data;
13541 +
13542 +struct ixp_q {
13543 +       struct list_head         ixp_q_list;
13544 +       struct ixp_data         *ixp_q_data;
13545 +       struct cryptop          *ixp_q_crp;
13546 +       struct cryptodesc       *ixp_q_ccrd;
13547 +       struct cryptodesc       *ixp_q_acrd;
13548 +       IX_MBUF                          ixp_q_mbuf;
13549 +       UINT8                           *ixp_hash_dest; /* Location for hash in client buffer */
13550 +       UINT8                           *ixp_hash_src; /* Location of hash in internal buffer */
13551 +       unsigned char            ixp_q_iv_data[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH];
13552 +       unsigned char           *ixp_q_iv;
13553 +};
13554 +
13555 +struct ixp_data {
13556 +       int                                      ixp_registered;        /* is the context registered */
13557 +       int                                      ixp_crd_flags;         /* detect direction changes */
13558 +
13559 +       int                                      ixp_cipher_alg;
13560 +       int                                      ixp_auth_alg;
13561 +
13562 +       UINT32                           ixp_ctx_id;
13563 +       UINT32                           ixp_hash_key_id;       /* used when hashing */
13564 +       IxCryptoAccCtx           ixp_ctx;
13565 +       IX_MBUF                          ixp_pri_mbuf;
13566 +       IX_MBUF                          ixp_sec_mbuf;
13567 +
13568 +       struct work_struct   ixp_pending_work;
13569 +       struct work_struct   ixp_registration_work;
13570 +       struct list_head         ixp_q;                         /* unprocessed requests */
13571 +};
13572 +
13573 +#ifdef __ixp46X
13574 +
13575 +#define        MAX_IOP_SIZE    64      /* words */
13576 +#define        MAX_OOP_SIZE    128
13577 +
13578 +#define        MAX_PARAMS              3
13579 +
13580 +struct ixp_pkq {
13581 +       struct list_head                         pkq_list;
13582 +       struct cryptkop                         *pkq_krp;
13583 +
13584 +       IxCryptoAccPkeEauInOperands      pkq_op;
13585 +       IxCryptoAccPkeEauOpResult        pkq_result;
13586 +
13587 +       UINT32                                           pkq_ibuf0[MAX_IOP_SIZE];
13588 +       UINT32                                           pkq_ibuf1[MAX_IOP_SIZE];
13589 +       UINT32                                           pkq_ibuf2[MAX_IOP_SIZE];
13590 +       UINT32                                           pkq_obuf[MAX_OOP_SIZE];
13591 +};
13592 +
13593 +static LIST_HEAD(ixp_pkq); /* current PK wait list */
13594 +static struct ixp_pkq *ixp_pk_cur;
13595 +static spinlock_t ixp_pkq_lock;
13596 +
13597 +#endif /* __ixp46X */
13598 +
13599 +static int ixp_blocked = 0;
13600 +
13601 +static int32_t                  ixp_id = -1;
13602 +static struct ixp_data **ixp_sessions = NULL;
13603 +static u_int32_t                ixp_sesnum = 0;
13604 +
13605 +static int ixp_process(device_t, struct cryptop *, int);
13606 +static int ixp_newsession(device_t, u_int32_t *, struct cryptoini *);
13607 +static int ixp_freesession(device_t, u_int64_t);
13608 +#ifdef __ixp46X
13609 +static int ixp_kprocess(device_t, struct cryptkop *krp, int hint);
13610 +#endif
13611 +
13612 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
13613 +static kmem_cache_t *qcache;
13614 +#else
13615 +static struct kmem_cache *qcache;
13616 +#endif
13617 +
13618 +#define debug ixp_debug
13619 +static int ixp_debug = 0;
13620 +module_param(ixp_debug, int, 0644);
13621 +MODULE_PARM_DESC(ixp_debug, "Enable debug");
13622 +
13623 +static int ixp_init_crypto = 1;
13624 +module_param(ixp_init_crypto, int, 0444); /* RO after load/boot */
13625 +MODULE_PARM_DESC(ixp_init_crypto, "Call ixCryptoAccInit (default is 1)");
13626 +
13627 +static void ixp_process_pending(void *arg);
13628 +static void ixp_registration(void *arg);
13629 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13630 +static void ixp_process_pending_wq(struct work_struct *work);
13631 +static void ixp_registration_wq(struct work_struct *work);
13632 +#endif
13633 +
13634 +/*
13635 + * dummy device structure
13636 + */
13637 +
13638 +static struct {
13639 +       softc_device_decl       sc_dev;
13640 +} ixpdev;
13641 +
13642 +static device_method_t ixp_methods = {
13643 +       /* crypto device methods */
13644 +       DEVMETHOD(cryptodev_newsession, ixp_newsession),
13645 +       DEVMETHOD(cryptodev_freesession,ixp_freesession),
13646 +       DEVMETHOD(cryptodev_process,    ixp_process),
13647 +#ifdef __ixp46X
13648 +       DEVMETHOD(cryptodev_kprocess,   ixp_kprocess),
13649 +#endif
13650 +};
13651 +
13652 +/*
13653 + * Generate a new software session.
13654 + */
13655 +static int
13656 +ixp_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
13657 +{
13658 +       struct ixp_data *ixp;
13659 +       u_int32_t i;
13660 +#define AUTH_LEN(cri, def) \
13661 +       (cri->cri_mlen ? cri->cri_mlen : (def))
13662 +
13663 +       dprintk("%s():alg %d\n", __FUNCTION__,cri->cri_alg);
13664 +       if (sid == NULL || cri == NULL) {
13665 +               dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
13666 +               return EINVAL;
13667 +       }
13668 +
13669 +       if (ixp_sessions) {
13670 +               for (i = 1; i < ixp_sesnum; i++)
13671 +                       if (ixp_sessions[i] == NULL)
13672 +                               break;
13673 +       } else
13674 +               i = 1;          /* NB: to silence compiler warning */
13675 +
13676 +       if (ixp_sessions == NULL || i == ixp_sesnum) {
13677 +               struct ixp_data **ixpd;
13678 +
13679 +               if (ixp_sessions == NULL) {
13680 +                       i = 1; /* We leave ixp_sessions[0] empty */
13681 +                       ixp_sesnum = CRYPTO_SW_SESSIONS;
13682 +               } else
13683 +                       ixp_sesnum *= 2;
13684 +
13685 +               ixpd = kmalloc(ixp_sesnum * sizeof(struct ixp_data *), SLAB_ATOMIC);
13686 +               if (ixpd == NULL) {
13687 +                       /* Reset session number */
13688 +                       if (ixp_sesnum == CRYPTO_SW_SESSIONS)
13689 +                               ixp_sesnum = 0;
13690 +                       else
13691 +                               ixp_sesnum /= 2;
13692 +                       dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
13693 +                       return ENOBUFS;
13694 +               }
13695 +               memset(ixpd, 0, ixp_sesnum * sizeof(struct ixp_data *));
13696 +
13697 +               /* Copy existing sessions */
13698 +               if (ixp_sessions) {
13699 +                       memcpy(ixpd, ixp_sessions,
13700 +                           (ixp_sesnum / 2) * sizeof(struct ixp_data *));
13701 +                       kfree(ixp_sessions);
13702 +               }
13703 +
13704 +               ixp_sessions = ixpd;
13705 +       }
13706 +
13707 +       ixp_sessions[i] = (struct ixp_data *) kmalloc(sizeof(struct ixp_data),
13708 +                       SLAB_ATOMIC);
13709 +       if (ixp_sessions[i] == NULL) {
13710 +               ixp_freesession(NULL, i);
13711 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
13712 +               return ENOBUFS;
13713 +       }
13714 +
13715 +       *sid = i;
13716 +
13717 +       ixp = ixp_sessions[i];
13718 +       memset(ixp, 0, sizeof(*ixp));
13719 +
13720 +       ixp->ixp_cipher_alg = -1;
13721 +       ixp->ixp_auth_alg = -1;
13722 +       ixp->ixp_ctx_id = -1;
13723 +       INIT_LIST_HEAD(&ixp->ixp_q);
13724 +
13725 +       ixp->ixp_ctx.useDifferentSrcAndDestMbufs = 0;
13726 +
13727 +       while (cri) {
13728 +               switch (cri->cri_alg) {
13729 +               case CRYPTO_DES_CBC:
13730 +                       ixp->ixp_cipher_alg = cri->cri_alg;
13731 +                       ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_DES;
13732 +                       ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13733 +                       ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13734 +                       ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
13735 +                       ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
13736 +                                               IX_CRYPTO_ACC_DES_IV_64;
13737 +                       memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13738 +                                       cri->cri_key, (cri->cri_klen + 7) / 8);
13739 +                       break;
13740 +
13741 +               case CRYPTO_3DES_CBC:
13742 +                       ixp->ixp_cipher_alg = cri->cri_alg;
13743 +                       ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
13744 +                       ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13745 +                       ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13746 +                       ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
13747 +                       ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
13748 +                                               IX_CRYPTO_ACC_DES_IV_64;
13749 +                       memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13750 +                                       cri->cri_key, (cri->cri_klen + 7) / 8);
13751 +                       break;
13752 +
13753 +               case CRYPTO_RIJNDAEL128_CBC:
13754 +                       ixp->ixp_cipher_alg = cri->cri_alg;
13755 +                       ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_AES;
13756 +                       ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13757 +                       ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13758 +                       ixp->ixp_ctx.cipherCtx.cipherBlockLen = 16;
13759 +                       ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen = 16;
13760 +                       memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13761 +                                       cri->cri_key, (cri->cri_klen + 7) / 8);
13762 +                       break;
13763 +
13764 +               case CRYPTO_MD5:
13765 +               case CRYPTO_MD5_HMAC:
13766 +                       ixp->ixp_auth_alg = cri->cri_alg;
13767 +                       ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_MD5;
13768 +                       ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, MD5_HASH_LEN);
13769 +                       ixp->ixp_ctx.authCtx.aadLen = 0;
13770 +                       /* Only MD5_HMAC needs a key */
13771 +                       if (cri->cri_alg == CRYPTO_MD5_HMAC) {
13772 +                               ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
13773 +                               if (ixp->ixp_ctx.authCtx.authKeyLen >
13774 +                                               sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
13775 +                                       printk(
13776 +                                               "ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
13777 +                                                       cri->cri_klen);
13778 +                                       ixp_freesession(NULL, i);
13779 +                                       return EINVAL;
13780 +                               }
13781 +                               memcpy(ixp->ixp_ctx.authCtx.key.authKey,
13782 +                                               cri->cri_key, (cri->cri_klen + 7) / 8);
13783 +                       }
13784 +                       break;
13785 +
13786 +               case CRYPTO_SHA1:
13787 +               case CRYPTO_SHA1_HMAC:
13788 +                       ixp->ixp_auth_alg = cri->cri_alg;
13789 +                       ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
13790 +                       ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, SHA1_HASH_LEN);
13791 +                       ixp->ixp_ctx.authCtx.aadLen = 0;
13792 +                       /* Only SHA1_HMAC needs a key */
13793 +                       if (cri->cri_alg == CRYPTO_SHA1_HMAC) {
13794 +                               ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
13795 +                               if (ixp->ixp_ctx.authCtx.authKeyLen >
13796 +                                               sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
13797 +                                       printk(
13798 +                                               "ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
13799 +                                                       cri->cri_klen);
13800 +                                       ixp_freesession(NULL, i);
13801 +                                       return EINVAL;
13802 +                               }
13803 +                               memcpy(ixp->ixp_ctx.authCtx.key.authKey,
13804 +                                               cri->cri_key, (cri->cri_klen + 7) / 8);
13805 +                       }
13806 +                       break;
13807 +
13808 +               default:
13809 +                       printk("ixp: unknown algo 0x%x\n", cri->cri_alg);
13810 +                       ixp_freesession(NULL, i);
13811 +                       return EINVAL;
13812 +               }
13813 +               cri = cri->cri_next;
13814 +       }
13815 +
13816 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13817 +       INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending_wq);
13818 +       INIT_WORK(&ixp->ixp_registration_work, ixp_registration_wq);
13819 +#else
13820 +       INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending, ixp);
13821 +       INIT_WORK(&ixp->ixp_registration_work, ixp_registration, ixp);
13822 +#endif
13823 +
13824 +       return 0;
13825 +}
13826 +
13827 +
13828 +/*
13829 + * Free a session.
13830 + */
13831 +static int
13832 +ixp_freesession(device_t dev, u_int64_t tid)
13833 +{
13834 +       u_int32_t sid = CRYPTO_SESID2LID(tid);
13835 +
13836 +       dprintk("%s()\n", __FUNCTION__);
13837 +       if (sid > ixp_sesnum || ixp_sessions == NULL ||
13838 +                       ixp_sessions[sid] == NULL) {
13839 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
13840 +               return EINVAL;
13841 +       }
13842 +
13843 +       /* Silently accept and return */
13844 +       if (sid == 0)
13845 +               return 0;
13846 +
13847 +       if (ixp_sessions[sid]) {
13848 +               if (ixp_sessions[sid]->ixp_ctx_id != -1) {
13849 +                       ixCryptoAccCtxUnregister(ixp_sessions[sid]->ixp_ctx_id);
13850 +                       ixp_sessions[sid]->ixp_ctx_id = -1;
13851 +               }
13852 +
13853 +               flush_scheduled_work();
13854 +
13855 +               kfree(ixp_sessions[sid]);
13856 +       }
13857 +       ixp_sessions[sid] = NULL;
13858 +       if (ixp_blocked) {
13859 +               ixp_blocked = 0;
13860 +               crypto_unblock(ixp_id, CRYPTO_SYMQ);
13861 +       }
13862 +       return 0;
13863 +}
13864 +
13865 +
13866 +/*
13867 + * callback for when hash processing is complete
13868 + */
13869 +
13870 +static void
13871 +ixp_hash_perform_cb(
13872 +       UINT32 hash_key_id,
13873 +       IX_MBUF *bufp,
13874 +       IxCryptoAccStatus status)
13875 +{
13876 +       struct ixp_q *q;
13877 +
13878 +       dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__, hash_key_id, bufp, status);
13879 +
13880 +       if (bufp == NULL) {
13881 +               printk("ixp: NULL buf in %s\n", __FUNCTION__);
13882 +               return;
13883 +       }
13884 +
13885 +       q = IX_MBUF_PRIV(bufp);
13886 +       if (q == NULL) {
13887 +               printk("ixp: NULL priv in %s\n", __FUNCTION__);
13888 +               return;
13889 +       }
13890 +
13891 +       if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
13892 +               /* On success, need to copy hash back into original client buffer */
13893 +               memcpy(q->ixp_hash_dest, q->ixp_hash_src,
13894 +                               (q->ixp_q_data->ixp_auth_alg == CRYPTO_SHA1) ?
13895 +                                       SHA1_HASH_LEN : MD5_HASH_LEN);
13896 +       }
13897 +       else {
13898 +               printk("ixp: hash perform failed status=%d\n", status);
13899 +               q->ixp_q_crp->crp_etype = EINVAL;
13900 +       }
13901 +
13902 +       /* Free internal buffer used for hashing */
13903 +       kfree(IX_MBUF_MDATA(&q->ixp_q_mbuf));
13904 +
13905 +       crypto_done(q->ixp_q_crp);
13906 +       kmem_cache_free(qcache, q);
13907 +}
13908 +
13909 +/*
13910 + * setup a request and perform it
13911 + */
13912 +static void
13913 +ixp_q_process(struct ixp_q *q)
13914 +{
13915 +       IxCryptoAccStatus status;
13916 +       struct ixp_data *ixp = q->ixp_q_data;
13917 +       int auth_off = 0;
13918 +       int auth_len = 0;
13919 +       int crypt_off = 0;
13920 +       int crypt_len = 0;
13921 +       int icv_off = 0;
13922 +       char *crypt_func;
13923 +
13924 +       dprintk("%s(%p)\n", __FUNCTION__, q);
13925 +
13926 +       if (q->ixp_q_ccrd) {
13927 +               if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT) {
13928 +                       q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
13929 +               } else {
13930 +                       q->ixp_q_iv = q->ixp_q_iv_data;
13931 +                       crypto_copydata(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
13932 +                                       q->ixp_q_ccrd->crd_inject,
13933 +                                       ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
13934 +                                       (caddr_t) q->ixp_q_iv);
13935 +               }
13936 +
13937 +               if (q->ixp_q_acrd) {
13938 +                       auth_off = q->ixp_q_acrd->crd_skip;
13939 +                       auth_len = q->ixp_q_acrd->crd_len;
13940 +                       icv_off  = q->ixp_q_acrd->crd_inject;
13941 +               }
13942 +
13943 +               crypt_off = q->ixp_q_ccrd->crd_skip;
13944 +               crypt_len = q->ixp_q_ccrd->crd_len;
13945 +       } else { /* if (q->ixp_q_acrd) */
13946 +               auth_off = q->ixp_q_acrd->crd_skip;
13947 +               auth_len = q->ixp_q_acrd->crd_len;
13948 +               icv_off  = q->ixp_q_acrd->crd_inject;
13949 +       }
13950 +
13951 +       if (q->ixp_q_crp->crp_flags & CRYPTO_F_SKBUF) {
13952 +               struct sk_buff *skb = (struct sk_buff *) q->ixp_q_crp->crp_buf;
13953 +               if (skb_shinfo(skb)->nr_frags) {
13954 +                       /*
13955 +                        * DAVIDM fix this limitation one day by using
13956 +                        * a buffer pool and chaining,  it is not currently
13957 +                        * needed for current user/kernel space acceleration
13958 +                        */
13959 +                       printk("ixp: Cannot handle fragmented skb's yet !\n");
13960 +                       q->ixp_q_crp->crp_etype = ENOENT;
13961 +                       goto done;
13962 +               }
13963 +               IX_MBUF_MLEN(&q->ixp_q_mbuf) =
13964 +                               IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =  skb->len;
13965 +               IX_MBUF_MDATA(&q->ixp_q_mbuf) = skb->data;
13966 +       } else if (q->ixp_q_crp->crp_flags & CRYPTO_F_IOV) {
13967 +               struct uio *uiop = (struct uio *) q->ixp_q_crp->crp_buf;
13968 +               if (uiop->uio_iovcnt != 1) {
13969 +                       /*
13970 +                        * DAVIDM fix this limitation one day by using
13971 +                        * a buffer pool and chaining,  it is not currently
13972 +                        * needed for current user/kernel space acceleration
13973 +                        */
13974 +                       printk("ixp: Cannot handle more than 1 iovec yet !\n");
13975 +                       q->ixp_q_crp->crp_etype = ENOENT;
13976 +                       goto done;
13977 +               }
13978 +               IX_MBUF_MLEN(&q->ixp_q_mbuf) =
13979 +                               IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_len;
13980 +               IX_MBUF_MDATA(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_base;
13981 +       } else /* contig buffer */ {
13982 +               IX_MBUF_MLEN(&q->ixp_q_mbuf)  =
13983 +                               IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_ilen;
13984 +               IX_MBUF_MDATA(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_buf;
13985 +       }
13986 +
13987 +       IX_MBUF_PRIV(&q->ixp_q_mbuf) = q;
13988 +
13989 +       if (ixp->ixp_auth_alg == CRYPTO_SHA1 || ixp->ixp_auth_alg == CRYPTO_MD5) {
13990 +               /*
13991 +                * For SHA1 and MD5 hash, need to create an internal buffer that is big
13992 +                * enough to hold the original data + the appropriate padding for the
13993 +                * hash algorithm.
13994 +                */
13995 +               UINT8 *tbuf = NULL;
13996 +
13997 +               IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
13998 +                       ((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
13999 +               tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
14000 +               
14001 +               if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
14002 +                       printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
14003 +                                       IX_MBUF_MLEN(&q->ixp_q_mbuf));
14004 +                       q->ixp_q_crp->crp_etype = ENOMEM;
14005 +                       goto done;
14006 +               }
14007 +               memcpy(tbuf, &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off], auth_len);
14008 +
14009 +               /* Set location in client buffer to copy hash into */
14010 +               q->ixp_hash_dest =
14011 +                       &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off + auth_len];
14012 +
14013 +               IX_MBUF_MDATA(&q->ixp_q_mbuf) = tbuf;
14014 +
14015 +               /* Set location in internal buffer for where hash starts */
14016 +               q->ixp_hash_src = &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_len];
14017 +
14018 +               crypt_func = "ixCryptoAccHashPerform";
14019 +               status = ixCryptoAccHashPerform(ixp->ixp_ctx.authCtx.authAlgo,
14020 +                               &q->ixp_q_mbuf, ixp_hash_perform_cb, 0, auth_len, auth_len,
14021 +                               &ixp->ixp_hash_key_id);
14022 +       }
14023 +       else {
14024 +               crypt_func = "ixCryptoAccAuthCryptPerform";
14025 +               status = ixCryptoAccAuthCryptPerform(ixp->ixp_ctx_id, &q->ixp_q_mbuf,
14026 +                       NULL, auth_off, auth_len, crypt_off, crypt_len, icv_off,
14027 +                       q->ixp_q_iv);
14028 +       }
14029 +
14030 +       if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
14031 +               return;
14032 +
14033 +       if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL == status) {
14034 +               q->ixp_q_crp->crp_etype = ENOMEM;
14035 +               goto done;
14036 +       }
14037 +
14038 +       printk("ixp: %s failed %u\n", crypt_func, status);
14039 +       q->ixp_q_crp->crp_etype = EINVAL;
14040 +
14041 +done:
14042 +       crypto_done(q->ixp_q_crp);
14043 +       kmem_cache_free(qcache, q);
14044 +}
14045 +
14046 +
14047 +/*
14048 + * because we cannot process the Q from the Register callback
14049 + * we do it here on a task Q.
14050 + */
14051 +
14052 +static void
14053 +ixp_process_pending(void *arg)
14054 +{
14055 +       struct ixp_data *ixp = arg;
14056 +       struct ixp_q *q = NULL;
14057 +
14058 +       dprintk("%s(%p)\n", __FUNCTION__, arg);
14059 +
14060 +       if (!ixp)
14061 +               return;
14062 +
14063 +       while (!list_empty(&ixp->ixp_q)) {
14064 +               q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14065 +               list_del(&q->ixp_q_list);
14066 +               ixp_q_process(q);
14067 +       }
14068 +}
14069 +
14070 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
14071 +static void
14072 +ixp_process_pending_wq(struct work_struct *work)
14073 +{
14074 +       struct ixp_data *ixp = container_of(work, struct ixp_data,
14075 +                                                               ixp_pending_work);
14076 +       ixp_process_pending(ixp);
14077 +}
14078 +#endif
14079 +
14080 +/*
14081 + * callback for when context registration is complete
14082 + */
14083 +
14084 +static void
14085 +ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
14086 +{
14087 +       int i;
14088 +       struct ixp_data *ixp;
14089 +       struct ixp_q *q;
14090 +
14091 +       dprintk("%s(%d, %p, %d)\n", __FUNCTION__, ctx_id, bufp, status);
14092 +
14093 +       /*
14094 +        * free any buffer passed in to this routine
14095 +        */
14096 +       if (bufp) {
14097 +               IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
14098 +               kfree(IX_MBUF_MDATA(bufp));
14099 +               IX_MBUF_MDATA(bufp) = NULL;
14100 +       }
14101 +
14102 +       for (i = 0; i < ixp_sesnum; i++) {
14103 +               ixp = ixp_sessions[i];
14104 +               if (ixp && ixp->ixp_ctx_id == ctx_id)
14105 +                       break;
14106 +       }
14107 +       if (i >= ixp_sesnum) {
14108 +               printk("ixp: invalid context id %d\n", ctx_id);
14109 +               return;
14110 +       }
14111 +
14112 +       if (IX_CRYPTO_ACC_STATUS_WAIT == status) {
14113 +               /* this is normal to free the first of two buffers */
14114 +               dprintk("ixp: register not finished yet.\n");
14115 +               return;
14116 +       }
14117 +
14118 +       if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
14119 +               printk("ixp: register failed 0x%x\n", status);
14120 +               while (!list_empty(&ixp->ixp_q)) {
14121 +                       q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14122 +                       list_del(&q->ixp_q_list);
14123 +                       q->ixp_q_crp->crp_etype = EINVAL;
14124 +                       crypto_done(q->ixp_q_crp);
14125 +                       kmem_cache_free(qcache, q);
14126 +               }
14127 +               return;
14128 +       }
14129 +
14130 +       /*
14131 +        * we are now registered,  we cannot start processing the Q here
14132 +        * or we get strange errors with AES (DES/3DES seem to be ok).
14133 +        */
14134 +       ixp->ixp_registered = 1;
14135 +       schedule_work(&ixp->ixp_pending_work);
14136 +}
14137 +
14138 +
14139 +/*
14140 + * callback for when data processing is complete
14141 + */
14142 +
14143 +static void
14144 +ixp_perform_cb(
14145 +       UINT32 ctx_id,
14146 +       IX_MBUF *sbufp,
14147 +       IX_MBUF *dbufp,
14148 +       IxCryptoAccStatus status)
14149 +{
14150 +       struct ixp_q *q;
14151 +
14152 +       dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__, ctx_id, sbufp,
14153 +                       dbufp, status);
14154 +
14155 +       if (sbufp == NULL) {
14156 +               printk("ixp: NULL sbuf in ixp_perform_cb\n");
14157 +               return;
14158 +       }
14159 +
14160 +       q = IX_MBUF_PRIV(sbufp);
14161 +       if (q == NULL) {
14162 +               printk("ixp: NULL priv in ixp_perform_cb\n");
14163 +               return;
14164 +       }
14165 +
14166 +       if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14167 +               printk("ixp: perform failed status=%d\n", status);
14168 +               q->ixp_q_crp->crp_etype = EINVAL;
14169 +       }
14170 +
14171 +       crypto_done(q->ixp_q_crp);
14172 +       kmem_cache_free(qcache, q);
14173 +}
14174 +
14175 +
14176 +/*
14177 + * registration is not callable at IRQ time,  so we defer
14178 + * to a task queue,  this routines completes the registration for us
14179 + * when the task queue runs
14180 + *
14181 + * Unfortunately this means we cannot tell OCF that the driver is blocked,
14182 + * we do that on the next request.
14183 + */
14184 +
14185 +static void
14186 +ixp_registration(void *arg)
14187 +{
14188 +       struct ixp_data *ixp = arg;
14189 +       struct ixp_q *q = NULL;
14190 +       IX_MBUF *pri = NULL, *sec = NULL;
14191 +       int status = IX_CRYPTO_ACC_STATUS_SUCCESS;
14192 +
14193 +       if (!ixp) {
14194 +               printk("ixp: ixp_registration with no arg\n");
14195 +               return;
14196 +       }
14197 +
14198 +       if (ixp->ixp_ctx_id != -1) {
14199 +               ixCryptoAccCtxUnregister(ixp->ixp_ctx_id);
14200 +               ixp->ixp_ctx_id = -1;
14201 +       }
14202 +
14203 +       if (list_empty(&ixp->ixp_q)) {
14204 +               printk("ixp: ixp_registration with no Q\n");
14205 +               return;
14206 +       }
14207 +
14208 +       /*
14209 +        * setup the primary and secondary buffers
14210 +        */
14211 +       q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14212 +       if (q->ixp_q_acrd) {
14213 +               pri = &ixp->ixp_pri_mbuf;
14214 +               sec = &ixp->ixp_sec_mbuf;
14215 +               IX_MBUF_MLEN(pri)  = IX_MBUF_PKT_LEN(pri) = 128;
14216 +               IX_MBUF_MDATA(pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
14217 +               IX_MBUF_MLEN(sec)  = IX_MBUF_PKT_LEN(sec) = 128;
14218 +               IX_MBUF_MDATA(sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
14219 +       }
14220 +
14221 +       /* Only need to register if a crypt op or HMAC op */
14222 +       if (!(ixp->ixp_auth_alg == CRYPTO_SHA1 ||
14223 +                               ixp->ixp_auth_alg == CRYPTO_MD5)) {
14224 +               status = ixCryptoAccCtxRegister(
14225 +                                       &ixp->ixp_ctx,
14226 +                                       pri, sec,
14227 +                                       ixp_register_cb,
14228 +                                       ixp_perform_cb,
14229 +                                       &ixp->ixp_ctx_id);
14230 +       }
14231 +       else {
14232 +               /* Otherwise we start processing pending q */
14233 +               schedule_work(&ixp->ixp_pending_work);
14234 +       }
14235 +
14236 +       if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
14237 +               return;
14238 +
14239 +       if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS == status) {
14240 +               printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
14241 +               ixp_blocked = 1;
14242 +               /* perhaps we should return EGAIN on queued ops ? */
14243 +               return;
14244 +       }
14245 +
14246 +       printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
14247 +       ixp->ixp_ctx_id = -1;
14248 +
14249 +       /*
14250 +        * everything waiting is toasted
14251 +        */
14252 +       while (!list_empty(&ixp->ixp_q)) {
14253 +               q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14254 +               list_del(&q->ixp_q_list);
14255 +               q->ixp_q_crp->crp_etype = ENOENT;
14256 +               crypto_done(q->ixp_q_crp);
14257 +               kmem_cache_free(qcache, q);
14258 +       }
14259 +}
14260 +
14261 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
14262 +static void
14263 +ixp_registration_wq(struct work_struct *work)
14264 +{
14265 +       struct ixp_data *ixp = container_of(work, struct ixp_data,
14266 +                                                               ixp_registration_work);
14267 +       ixp_registration(ixp);
14268 +}
14269 +#endif
14270 +
14271 +/*
14272 + * Process a request.
14273 + */
14274 +static int
14275 +ixp_process(device_t dev, struct cryptop *crp, int hint)
14276 +{
14277 +       struct ixp_data *ixp;
14278 +       unsigned int lid;
14279 +       struct ixp_q *q = NULL;
14280 +       int status;
14281 +
14282 +       dprintk("%s()\n", __FUNCTION__);
14283 +
14284 +       /* Sanity check */
14285 +       if (crp == NULL) {
14286 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
14287 +               return EINVAL;
14288 +       }
14289 +
14290 +       crp->crp_etype = 0;
14291 +
14292 +       if (ixp_blocked)
14293 +               return ERESTART;
14294 +
14295 +       if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
14296 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
14297 +               crp->crp_etype = EINVAL;
14298 +               goto done;
14299 +       }
14300 +
14301 +       /*
14302 +        * find the session we are using
14303 +        */
14304 +
14305 +       lid = crp->crp_sid & 0xffffffff;
14306 +       if (lid >= ixp_sesnum || lid == 0 || ixp_sessions == NULL ||
14307 +                       ixp_sessions[lid] == NULL) {
14308 +               crp->crp_etype = ENOENT;
14309 +               dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
14310 +               goto done;
14311 +       }
14312 +       ixp = ixp_sessions[lid];
14313 +
14314 +       /*
14315 +        * setup a new request ready for queuing
14316 +        */
14317 +       q = kmem_cache_alloc(qcache, SLAB_ATOMIC);
14318 +       if (q == NULL) {
14319 +               dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
14320 +               crp->crp_etype = ENOMEM;
14321 +               goto done;
14322 +       }
14323 +       /*
14324 +        * save some cycles by only zeroing the important bits
14325 +        */
14326 +       memset(&q->ixp_q_mbuf, 0, sizeof(q->ixp_q_mbuf));
14327 +       q->ixp_q_ccrd = NULL;
14328 +       q->ixp_q_acrd = NULL;
14329 +       q->ixp_q_crp = crp;
14330 +       q->ixp_q_data = ixp;
14331 +
14332 +       /*
14333 +        * point the cipher and auth descriptors appropriately
14334 +        * check that we have something to do
14335 +        */
14336 +       if (crp->crp_desc->crd_alg == ixp->ixp_cipher_alg)
14337 +               q->ixp_q_ccrd = crp->crp_desc;
14338 +       else if (crp->crp_desc->crd_alg == ixp->ixp_auth_alg)
14339 +               q->ixp_q_acrd = crp->crp_desc;
14340 +       else {
14341 +               crp->crp_etype = ENOENT;
14342 +               dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
14343 +               goto done;
14344 +       }
14345 +       if (crp->crp_desc->crd_next) {
14346 +               if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_cipher_alg)
14347 +                       q->ixp_q_ccrd = crp->crp_desc->crd_next;
14348 +               else if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_auth_alg)
14349 +                       q->ixp_q_acrd = crp->crp_desc->crd_next;
14350 +               else {
14351 +                       crp->crp_etype = ENOENT;
14352 +                       dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
14353 +                       goto done;
14354 +               }
14355 +       }
14356 +
14357 +       /*
14358 +        * If there is a direction change for this context then we mark it as
14359 +        * unregistered and re-register is for the new direction.  This is not
14360 +        * a very expensive operation and currently only tends to happen when
14361 +        * user-space application are doing benchmarks
14362 +        *
14363 +        * DM - we should be checking for pending requests before unregistering.
14364 +        */
14365 +       if (q->ixp_q_ccrd && ixp->ixp_registered &&
14366 +                       ixp->ixp_crd_flags != (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT)) {
14367 +               dprintk("%s - detected direction change on session\n", __FUNCTION__);
14368 +               ixp->ixp_registered = 0;
14369 +       }
14370 +
14371 +       /*
14372 +        * if we are registered,  call straight into the perform code
14373 +        */
14374 +       if (ixp->ixp_registered) {
14375 +               ixp_q_process(q);
14376 +               return 0;
14377 +       }
14378 +
14379 +       /*
14380 +        * the only part of the context not set in newsession is the direction
14381 +        * dependent parts
14382 +        */
14383 +       if (q->ixp_q_ccrd) {
14384 +               ixp->ixp_crd_flags = (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT);
14385 +               if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
14386 +                       ixp->ixp_ctx.operation = q->ixp_q_acrd ?
14387 +                                       IX_CRYPTO_ACC_OP_ENCRYPT_AUTH : IX_CRYPTO_ACC_OP_ENCRYPT;
14388 +               } else {
14389 +                       ixp->ixp_ctx.operation = q->ixp_q_acrd ?
14390 +                                       IX_CRYPTO_ACC_OP_AUTH_DECRYPT : IX_CRYPTO_ACC_OP_DECRYPT;
14391 +               }
14392 +       } else {
14393 +               /* q->ixp_q_acrd must be set if we are here */
14394 +               ixp->ixp_ctx.operation = IX_CRYPTO_ACC_OP_AUTH_CALC;
14395 +       }
14396 +
14397 +       status = list_empty(&ixp->ixp_q);
14398 +       list_add_tail(&q->ixp_q_list, &ixp->ixp_q);
14399 +       if (status)
14400 +               schedule_work(&ixp->ixp_registration_work);
14401 +       return 0;
14402 +
14403 +done:
14404 +       if (q)
14405 +               kmem_cache_free(qcache, q);
14406 +       crypto_done(crp);
14407 +       return 0;
14408 +}
14409 +
14410 +
14411 +#ifdef __ixp46X
14412 +/*
14413 + * key processing support for the ixp465
14414 + */
14415 +
14416 +
14417 +/*
14418 + * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
14419 + * assume zeroed and only copy bits that are significant
14420 + */
14421 +
14422 +static int
14423 +ixp_copy_ibuf(struct crparam *p, IxCryptoAccPkeEauOperand *op, UINT32 *buf)
14424 +{
14425 +       unsigned char *src = (unsigned char *) p->crp_p;
14426 +       unsigned char *dst;
14427 +       int len, bits = p->crp_nbits;
14428 +
14429 +       dprintk("%s()\n", __FUNCTION__);
14430 +
14431 +       if (bits > MAX_IOP_SIZE * sizeof(UINT32) * 8) {
14432 +               dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__,
14433 +                               bits, MAX_IOP_SIZE * sizeof(UINT32) * 8);
14434 +               return -1;
14435 +       }
14436 +
14437 +       len = (bits + 31) / 32; /* the number UINT32's needed */
14438 +
14439 +       dst = (unsigned char *) &buf[len];
14440 +       dst--;
14441 +
14442 +       while (bits > 0) {
14443 +               *dst-- = *src++;
14444 +               bits -= 8;
14445 +       }
14446 +
14447 +#if 0 /* no need to zero remaining bits as it is done during request alloc */
14448 +       while (dst > (unsigned char *) buf)
14449 +               *dst-- = '\0';
14450 +#endif
14451 +
14452 +       op->pData = buf;
14453 +       op->dataLen = len;
14454 +       return 0;
14455 +}
14456 +
14457 +/*
14458 + * copy out the result,  be as forgiving as we can about small output buffers
14459 + */
14460 +
14461 +static int
14462 +ixp_copy_obuf(struct crparam *p, IxCryptoAccPkeEauOpResult *op, UINT32 *buf)
14463 +{
14464 +       unsigned char *dst = (unsigned char *) p->crp_p;
14465 +       unsigned char *src = (unsigned char *) buf;
14466 +       int len, z, bits = p->crp_nbits;
14467 +
14468 +       dprintk("%s()\n", __FUNCTION__);
14469 +
14470 +       len = op->dataLen * sizeof(UINT32);
14471 +
14472 +       /* skip leading zeroes to be small buffer friendly */
14473 +       z = 0;
14474 +       while (z < len && src[z] == '\0')
14475 +               z++;
14476 +
14477 +       src += len;
14478 +       src--;
14479 +       len -= z;
14480 +
14481 +       while (len > 0 && bits > 0) {
14482 +               *dst++ = *src--;
14483 +               len--;
14484 +               bits -= 8;
14485 +       }
14486 +
14487 +       while (bits > 0) {
14488 +               *dst++ = '\0';
14489 +               bits -= 8;
14490 +       }
14491 +
14492 +       if (len > 0) {
14493 +               dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
14494 +                               __FUNCTION__, len, z, p->crp_nbits / 8);
14495 +               return -1;
14496 +       }
14497 +
14498 +       return 0;
14499 +}
14500 +
14501 +
14502 +/*
14503 + * the parameter offsets for exp_mod
14504 + */
14505 +
14506 +#define IXP_PARAM_BASE 0
14507 +#define IXP_PARAM_EXP  1
14508 +#define IXP_PARAM_MOD  2
14509 +#define IXP_PARAM_RES  3
14510 +
14511 +/*
14512 + * key processing complete callback,  is also used to start processing
14513 + * by passing a NULL for pResult
14514 + */
14515 +
14516 +static void
14517 +ixp_kperform_cb(
14518 +       IxCryptoAccPkeEauOperation operation,
14519 +       IxCryptoAccPkeEauOpResult *pResult,
14520 +       BOOL carryOrBorrow,
14521 +       IxCryptoAccStatus status)
14522 +{
14523 +       struct ixp_pkq *q, *tmp;
14524 +       unsigned long flags;
14525 +
14526 +       dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__, operation, pResult,
14527 +                       carryOrBorrow, status);
14528 +
14529 +       /* handle a completed request */
14530 +       if (pResult) {
14531 +               if (ixp_pk_cur && &ixp_pk_cur->pkq_result == pResult) {
14532 +                       q = ixp_pk_cur;
14533 +                       if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14534 +                               dprintk("%s() - op failed 0x%x\n", __FUNCTION__, status);
14535 +                               q->pkq_krp->krp_status = ERANGE; /* could do better */
14536 +                       } else {
14537 +                               /* copy out the result */
14538 +                               if (ixp_copy_obuf(&q->pkq_krp->krp_param[IXP_PARAM_RES],
14539 +                                               &q->pkq_result, q->pkq_obuf))
14540 +                                       q->pkq_krp->krp_status = ERANGE;
14541 +                       }
14542 +                       crypto_kdone(q->pkq_krp);
14543 +                       kfree(q);
14544 +                       ixp_pk_cur = NULL;
14545 +               } else
14546 +                       printk("%s - callback with invalid result pointer\n", __FUNCTION__);
14547 +       }
14548 +
14549 +       spin_lock_irqsave(&ixp_pkq_lock, flags);
14550 +       if (ixp_pk_cur || list_empty(&ixp_pkq)) {
14551 +               spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14552 +               return;
14553 +       }
14554 +
14555 +       list_for_each_entry_safe(q, tmp, &ixp_pkq, pkq_list) {
14556 +
14557 +               list_del(&q->pkq_list);
14558 +               ixp_pk_cur = q;
14559 +
14560 +               spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14561 +
14562 +               status = ixCryptoAccPkeEauPerform(
14563 +                               IX_CRYPTO_ACC_OP_EAU_MOD_EXP,
14564 +                               &q->pkq_op,
14565 +                               ixp_kperform_cb,
14566 +                               &q->pkq_result);
14567 +       
14568 +               if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
14569 +                       dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
14570 +                       return; /* callback will return here for callback */
14571 +               } else if (status == IX_CRYPTO_ACC_STATUS_RETRY) {
14572 +                       printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__);
14573 +               } else {
14574 +                       printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
14575 +                                       __FUNCTION__, status);
14576 +               }
14577 +               q->pkq_krp->krp_status = ERANGE; /* could do better */
14578 +               crypto_kdone(q->pkq_krp);
14579 +               kfree(q);
14580 +               spin_lock_irqsave(&ixp_pkq_lock, flags);
14581 +       }
14582 +       spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14583 +}
14584 +
14585 +
14586 +static int
14587 +ixp_kprocess(device_t dev, struct cryptkop *krp, int hint)
14588 +{
14589 +       struct ixp_pkq *q;
14590 +       int rc = 0;
14591 +       unsigned long flags;
14592 +
14593 +       dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__,
14594 +                       krp->krp_param[IXP_PARAM_BASE].crp_nbits,
14595 +                       krp->krp_param[IXP_PARAM_EXP].crp_nbits,
14596 +                       krp->krp_param[IXP_PARAM_MOD].crp_nbits,
14597 +                       krp->krp_param[IXP_PARAM_RES].crp_nbits);
14598 +
14599 +
14600 +       if (krp->krp_op != CRK_MOD_EXP) {
14601 +               krp->krp_status = EOPNOTSUPP;
14602 +               goto err;
14603 +       }
14604 +
14605 +       q = (struct ixp_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
14606 +       if (q == NULL) {
14607 +               krp->krp_status = ENOMEM;
14608 +               goto err;
14609 +       }
14610 +
14611 +       /*
14612 +        * The PKE engine does not appear to zero the output buffer
14613 +        * appropriately, so we need to do it all here.
14614 +        */
14615 +       memset(q, 0, sizeof(*q));
14616 +
14617 +       q->pkq_krp = krp;
14618 +       INIT_LIST_HEAD(&q->pkq_list);
14619 +
14620 +       if (ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_BASE], &q->pkq_op.modExpOpr.M,
14621 +                       q->pkq_ibuf0))
14622 +               rc = 1;
14623 +       if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_EXP],
14624 +                               &q->pkq_op.modExpOpr.e, q->pkq_ibuf1))
14625 +               rc = 2;
14626 +       if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_MOD],
14627 +                               &q->pkq_op.modExpOpr.N, q->pkq_ibuf2))
14628 +               rc = 3;
14629 +
14630 +       if (rc) {
14631 +               kfree(q);
14632 +               krp->krp_status = ERANGE;
14633 +               goto err;
14634 +       }
14635 +
14636 +       q->pkq_result.pData           = q->pkq_obuf;
14637 +       q->pkq_result.dataLen         =
14638 +                       (krp->krp_param[IXP_PARAM_RES].crp_nbits + 31) / 32;
14639 +
14640 +       spin_lock_irqsave(&ixp_pkq_lock, flags);
14641 +       list_add_tail(&q->pkq_list, &ixp_pkq);
14642 +       spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14643 +
14644 +       if (!ixp_pk_cur)
14645 +               ixp_kperform_cb(0, NULL, 0, 0);
14646 +       return (0);
14647 +
14648 +err:
14649 +       crypto_kdone(krp);
14650 +       return (0);
14651 +}
14652 +
14653 +
14654 +
14655 +#ifdef CONFIG_OCF_RANDOMHARVEST
14656 +/*
14657 + * We run the random number generator output through SHA so that it
14658 + * is FIPS compliant.
14659 + */
14660 +
14661 +static volatile int sha_done = 0;
14662 +static unsigned char sha_digest[20];
14663 +
14664 +static void
14665 +ixp_hash_cb(UINT8 *digest, IxCryptoAccStatus status)
14666 +{
14667 +       dprintk("%s(%p, %d)\n", __FUNCTION__, digest, status);
14668 +       if (sha_digest != digest)
14669 +               printk("digest error\n");
14670 +       if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
14671 +               sha_done = 1;
14672 +       else
14673 +               sha_done = -status;
14674 +}
14675 +
14676 +static int
14677 +ixp_read_random(void *arg, u_int32_t *buf, int maxwords)
14678 +{
14679 +       IxCryptoAccStatus status;
14680 +       int i, n, rc;
14681 +
14682 +       dprintk("%s(%p, %d)\n", __FUNCTION__, buf, maxwords);
14683 +       memset(buf, 0, maxwords * sizeof(*buf));
14684 +       status = ixCryptoAccPkePseudoRandomNumberGet(maxwords, buf);
14685 +       if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14686 +               dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
14687 +                               __FUNCTION__, status);
14688 +               return 0;
14689 +       }
14690 +
14691 +       /*
14692 +        * run the random data through SHA to make it look more random
14693 +        */
14694 +
14695 +       n = sizeof(sha_digest); /* process digest bytes at a time */
14696 +
14697 +       rc = 0;
14698 +       for (i = 0; i < maxwords; i += n / sizeof(*buf)) {
14699 +               if ((maxwords - i) * sizeof(*buf) < n)
14700 +                       n = (maxwords - i) * sizeof(*buf);
14701 +               sha_done = 0;
14702 +               status = ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1,
14703 +                               (UINT8 *) &buf[i], n, ixp_hash_cb, sha_digest);
14704 +               if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14705 +                       dprintk("ixCryptoAccPkeHashPerform failed %d\n", status);
14706 +                       return -EIO;
14707 +               }
14708 +               while (!sha_done)
14709 +                       schedule();
14710 +               if (sha_done < 0) {
14711 +                       dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done);
14712 +                       return 0;
14713 +               }
14714 +               memcpy(&buf[i], sha_digest, n);
14715 +               rc += n / sizeof(*buf);;
14716 +       }
14717 +
14718 +       return rc;
14719 +}
14720 +#endif /* CONFIG_OCF_RANDOMHARVEST */
14721 +
14722 +#endif /* __ixp46X */
14723 +
14724 +
14725 +
14726 +/*
14727 + * our driver startup and shutdown routines
14728 + */
14729 +
14730 +static int
14731 +ixp_init(void)
14732 +{
14733 +       dprintk("%s(%p)\n", __FUNCTION__, ixp_init);
14734 +
14735 +       if (ixp_init_crypto && ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS)
14736 +               printk("ixCryptoAccInit failed, assuming already initialised!\n");
14737 +
14738 +       qcache = kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q), 0,
14739 +                               SLAB_HWCACHE_ALIGN, NULL
14740 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
14741 +                               , NULL
14742 +#endif
14743 +                                 );
14744 +       if (!qcache) {
14745 +               printk("failed to create Qcache\n");
14746 +               return -ENOENT;
14747 +       }
14748 +
14749 +       memset(&ixpdev, 0, sizeof(ixpdev));
14750 +       softc_device_init(&ixpdev, "ixp4xx", 0, ixp_methods);
14751 +
14752 +       ixp_id = crypto_get_driverid(softc_get_device(&ixpdev),
14753 +                               CRYPTOCAP_F_HARDWARE);
14754 +       if (ixp_id < 0)
14755 +               panic("IXP/OCF crypto device cannot initialize!");
14756 +
14757 +#define        REGISTER(alg) \
14758 +       crypto_register(ixp_id,alg,0,0)
14759 +
14760 +       REGISTER(CRYPTO_DES_CBC);
14761 +       REGISTER(CRYPTO_3DES_CBC);
14762 +       REGISTER(CRYPTO_RIJNDAEL128_CBC);
14763 +#ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
14764 +       REGISTER(CRYPTO_MD5);
14765 +       REGISTER(CRYPTO_SHA1);
14766 +#endif
14767 +       REGISTER(CRYPTO_MD5_HMAC);
14768 +       REGISTER(CRYPTO_SHA1_HMAC);
14769 +#undef REGISTER
14770 +
14771 +#ifdef __ixp46X
14772 +       spin_lock_init(&ixp_pkq_lock);
14773 +       /*
14774 +        * we do not enable the go fast options here as they can potentially
14775 +        * allow timing based attacks
14776 +        *
14777 +        * http://www.openssl.org/news/secadv_20030219.txt
14778 +        */
14779 +       ixCryptoAccPkeEauExpConfig(0, 0);
14780 +       crypto_kregister(ixp_id, CRK_MOD_EXP, 0);
14781 +#ifdef CONFIG_OCF_RANDOMHARVEST
14782 +       crypto_rregister(ixp_id, ixp_read_random, NULL);
14783 +#endif
14784 +#endif
14785 +
14786 +       return 0;
14787 +}
14788 +
14789 +static void
14790 +ixp_exit(void)
14791 +{
14792 +       dprintk("%s()\n", __FUNCTION__);
14793 +       crypto_unregister_all(ixp_id);
14794 +       ixp_id = -1;
14795 +       kmem_cache_destroy(qcache);
14796 +       qcache = NULL;
14797 +}
14798 +
14799 +module_init(ixp_init);
14800 +module_exit(ixp_exit);
14801 +
14802 +MODULE_LICENSE("Dual BSD/GPL");
14803 +MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
14804 +MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");
14805 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
14806 +++ linux/crypto/ocf/cryptodev.c        2007-12-14 12:35:04.000000000 +1000
14807 @@ -0,0 +1,1048 @@
14808 +/*     $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $  */
14809 +
14810 +/*-
14811 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
14812 + * Copyright (C) 2006-2007 David McCullough
14813 + * Copyright (C) 2004-2005 Intel Corporation.
14814 + * The license and original author are listed below.
14815 + *
14816 + * Copyright (c) 2001 Theo de Raadt
14817 + * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
14818 + *
14819 + * Redistribution and use in source and binary forms, with or without
14820 + * modification, are permitted provided that the following conditions
14821 + * are met:
14822 + *
14823 + * 1. Redistributions of source code must retain the above copyright
14824 + *   notice, this list of conditions and the following disclaimer.
14825 + * 2. Redistributions in binary form must reproduce the above copyright
14826 + *   notice, this list of conditions and the following disclaimer in the
14827 + *   documentation and/or other materials provided with the distribution.
14828 + * 3. The name of the author may not be used to endorse or promote products
14829 + *   derived from this software without specific prior written permission.
14830 + *
14831 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14832 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
14833 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
14834 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
14835 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
14836 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
14837 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
14838 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
14839 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
14840 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
14841 + *
14842 + * Effort sponsored in part by the Defense Advanced Research Projects
14843 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
14844 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
14845 + *
14846 +__FBSDID("$FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.34 2007/05/09 19:37:02 gnn Exp $");
14847 + */
14848 +
14849 +#ifndef AUTOCONF_INCLUDED
14850 +#include <linux/config.h>
14851 +#endif
14852 +#include <linux/types.h>
14853 +#include <linux/time.h>
14854 +#include <linux/delay.h>
14855 +#include <linux/list.h>
14856 +#include <linux/init.h>
14857 +#include <linux/sched.h>
14858 +#include <linux/unistd.h>
14859 +#include <linux/module.h>
14860 +#include <linux/wait.h>
14861 +#include <linux/slab.h>
14862 +#include <linux/fs.h>
14863 +#include <linux/dcache.h>
14864 +#include <linux/file.h>
14865 +#include <linux/mount.h>
14866 +#include <linux/miscdevice.h>
14867 +#include <linux/version.h>
14868 +#include <asm/uaccess.h>
14869 +
14870 +#include <cryptodev.h>
14871 +#include <uio.h>
14872 +
14873 +extern asmlinkage long sys_dup(unsigned int fildes);
14874 +
14875 +#define debug cryptodev_debug
14876 +int cryptodev_debug = 0;
14877 +module_param(cryptodev_debug, int, 0644);
14878 +MODULE_PARM_DESC(cryptodev_debug, "Enable cryptodev debug");
14879 +
14880 +struct csession_info {
14881 +       u_int16_t       blocksize;
14882 +       u_int16_t       minkey, maxkey;
14883 +
14884 +       u_int16_t       keysize;
14885 +       /* u_int16_t    hashsize;  */
14886 +       u_int16_t       authsize;
14887 +       /* u_int16_t    ctxsize; */
14888 +};
14889 +
14890 +struct csession {
14891 +       struct list_head        list;
14892 +       u_int64_t       sid;
14893 +       u_int32_t       ses;
14894 +
14895 +       wait_queue_head_t waitq;
14896 +
14897 +       u_int32_t       cipher;
14898 +
14899 +       u_int32_t       mac;
14900 +
14901 +       caddr_t         key;
14902 +       int             keylen;
14903 +       u_char          tmp_iv[EALG_MAX_BLOCK_LEN];
14904 +
14905 +       caddr_t         mackey;
14906 +       int             mackeylen;
14907 +
14908 +       struct csession_info info;
14909 +
14910 +       struct iovec    iovec;
14911 +       struct uio      uio;
14912 +       int             error;
14913 +};
14914 +
14915 +struct fcrypt {
14916 +       struct list_head        csessions;
14917 +       int             sesn;
14918 +};
14919 +
14920 +static struct csession *csefind(struct fcrypt *, u_int);
14921 +static int csedelete(struct fcrypt *, struct csession *);
14922 +static struct csession *cseadd(struct fcrypt *, struct csession *);
14923 +static struct csession *csecreate(struct fcrypt *, u_int64_t,
14924 +               struct cryptoini *crie, struct cryptoini *cria, struct csession_info *);
14925 +static int csefree(struct csession *);
14926 +
14927 +static int cryptodev_op(struct csession *, struct crypt_op *);
14928 +static int cryptodev_key(struct crypt_kop *);
14929 +static int cryptodev_find(struct crypt_find_op *);
14930 +
14931 +static int cryptodev_cb(void *);
14932 +static int cryptodev_open(struct inode *inode, struct file *filp);
14933 +
14934 +/*
14935 + * Check a crypto identifier to see if it requested
14936 + * a valid crid and it's capabilities match.
14937 + */
14938 +static int
14939 +checkcrid(int crid)
14940 +{
14941 +       int hid = crid & ~(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
14942 +       int typ = crid & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
14943 +       int caps = 0;
14944 +       
14945 +       /* if the user hasn't selected a driver, then just call newsession */
14946 +       if (hid == 0 && typ != 0)
14947 +               return 0;
14948 +
14949 +       caps = crypto_getcaps(hid);
14950 +
14951 +       /* didn't find anything with capabilities */
14952 +       if (caps == 0) {
14953 +               dprintk("%s: hid=%x typ=%x not matched\n", __FUNCTION__, hid, typ);
14954 +               return EINVAL;
14955 +       }
14956 +       
14957 +       /* the user didn't specify SW or HW, so the driver is ok */
14958 +       if (typ == 0)
14959 +               return 0;
14960 +
14961 +       /* if the type specified didn't match */
14962 +       if (typ != (caps & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE))) {
14963 +               dprintk("%s: hid=%x typ=%x caps=%x not matched\n", __FUNCTION__,
14964 +                               hid, typ, caps);
14965 +               return EINVAL;
14966 +       }
14967 +
14968 +       return 0;
14969 +}
14970 +
14971 +static int
14972 +cryptodev_op(struct csession *cse, struct crypt_op *cop)
14973 +{
14974 +       struct cryptop *crp = NULL;
14975 +       struct cryptodesc *crde = NULL, *crda = NULL;
14976 +       int error = 0;
14977 +
14978 +       dprintk("%s()\n", __FUNCTION__);
14979 +       if (cop->len > CRYPTO_MAX_DATA_LEN) {
14980 +               dprintk("%s: %d > %d\n", __FUNCTION__, cop->len, CRYPTO_MAX_DATA_LEN);
14981 +               return (E2BIG);
14982 +       }
14983 +
14984 +       if (cse->info.blocksize && (cop->len % cse->info.blocksize) != 0) {
14985 +               dprintk("%s: blocksize=%d len=%d\n", __FUNCTION__, cse->info.blocksize,
14986 +                               cop->len);
14987 +               return (EINVAL);
14988 +       }
14989 +
14990 +       cse->uio.uio_iov = &cse->iovec;
14991 +       cse->uio.uio_iovcnt = 1;
14992 +       cse->uio.uio_offset = 0;
14993 +#if 0
14994 +       cse->uio.uio_resid = cop->len;
14995 +       cse->uio.uio_segflg = UIO_SYSSPACE;
14996 +       cse->uio.uio_rw = UIO_WRITE;
14997 +       cse->uio.uio_td = td;
14998 +#endif
14999 +       cse->uio.uio_iov[0].iov_len = cop->len;
15000 +       if (cse->info.authsize)
15001 +               cse->uio.uio_iov[0].iov_len += cse->info.authsize;
15002 +       cse->uio.uio_iov[0].iov_base = kmalloc(cse->uio.uio_iov[0].iov_len,
15003 +                       GFP_KERNEL);
15004 +
15005 +       if (cse->uio.uio_iov[0].iov_base == NULL) {
15006 +               dprintk("%s: iov_base kmalloc(%d) failed\n", __FUNCTION__,
15007 +                               cse->uio.uio_iov[0].iov_len);
15008 +               return (ENOMEM);
15009 +       }
15010 +
15011 +       crp = crypto_getreq((cse->info.blocksize != 0) + (cse->info.authsize != 0));
15012 +       if (crp == NULL) {
15013 +               dprintk("%s: ENOMEM\n", __FUNCTION__);
15014 +               error = ENOMEM;
15015 +               goto bail;
15016 +       }
15017 +
15018 +       if (cse->info.authsize) {
15019 +               crda = crp->crp_desc;
15020 +               if (cse->info.blocksize)
15021 +                       crde = crda->crd_next;
15022 +       } else {
15023 +               if (cse->info.blocksize)
15024 +                       crde = crp->crp_desc;
15025 +               else {
15026 +                       dprintk("%s: bad request\n", __FUNCTION__);
15027 +                       error = EINVAL;
15028 +                       goto bail;
15029 +               }
15030 +       }
15031 +
15032 +       if ((error = copy_from_user(cse->uio.uio_iov[0].iov_base, cop->src,
15033 +                                       cop->len))) {
15034 +               dprintk("%s: bad copy\n", __FUNCTION__);
15035 +               goto bail;
15036 +       }
15037 +
15038 +       if (crda) {
15039 +               crda->crd_skip = 0;
15040 +               crda->crd_len = cop->len;
15041 +               crda->crd_inject = cop->len;
15042 +
15043 +               crda->crd_alg = cse->mac;
15044 +               crda->crd_key = cse->mackey;
15045 +               crda->crd_klen = cse->mackeylen * 8;
15046 +       }
15047 +
15048 +       if (crde) {
15049 +               if (cop->op == COP_ENCRYPT)
15050 +                       crde->crd_flags |= CRD_F_ENCRYPT;
15051 +               else
15052 +                       crde->crd_flags &= ~CRD_F_ENCRYPT;
15053 +               crde->crd_len = cop->len;
15054 +               crde->crd_inject = 0;
15055 +
15056 +               crde->crd_alg = cse->cipher;
15057 +               crde->crd_key = cse->key;
15058 +               crde->crd_klen = cse->keylen * 8;
15059 +       }
15060 +
15061 +       crp->crp_ilen = cse->uio.uio_iov[0].iov_len;
15062 +       crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
15063 +                      | (cop->flags & COP_F_BATCH);
15064 +       crp->crp_buf = (caddr_t)&cse->uio;
15065 +       crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
15066 +       crp->crp_sid = cse->sid;
15067 +       crp->crp_opaque = (void *)cse;
15068 +
15069 +       if (cop->iv) {
15070 +               if (crde == NULL) {
15071 +                       error = EINVAL;
15072 +                       dprintk("%s no crde\n", __FUNCTION__);
15073 +                       goto bail;
15074 +               }
15075 +               if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
15076 +                       error = EINVAL;
15077 +                       dprintk("%s arc4 with IV\n", __FUNCTION__);
15078 +                       goto bail;
15079 +               }
15080 +               if ((error = copy_from_user(cse->tmp_iv, cop->iv,
15081 +                                               cse->info.blocksize))) {
15082 +                       dprintk("%s bad iv copy\n", __FUNCTION__);
15083 +                       goto bail;
15084 +               }
15085 +               memcpy(crde->crd_iv, cse->tmp_iv, cse->info.blocksize);
15086 +               crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
15087 +               crde->crd_skip = 0;
15088 +       } else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
15089 +               crde->crd_skip = 0;
15090 +       } else if (crde) {
15091 +               crde->crd_flags |= CRD_F_IV_PRESENT;
15092 +               crde->crd_skip = cse->info.blocksize;
15093 +               crde->crd_len -= cse->info.blocksize;
15094 +       }
15095 +
15096 +       if (cop->mac && crda == NULL) {
15097 +               error = EINVAL;
15098 +               dprintk("%s no crda\n", __FUNCTION__);
15099 +               goto bail;
15100 +       }
15101 +
15102 +       /*
15103 +        * Let the dispatch run unlocked, then, interlock against the
15104 +        * callback before checking if the operation completed and going
15105 +        * to sleep.  This insures drivers don't inherit our lock which
15106 +        * results in a lock order reversal between crypto_dispatch forced
15107 +        * entry and the crypto_done callback into us.
15108 +        */
15109 +       error = crypto_dispatch(crp);
15110 +       if (error == 0) {
15111 +               dprintk("%s about to WAIT\n", __FUNCTION__);
15112 +               /*
15113 +                * we really need to wait for driver to complete to maintain
15114 +                * state,  luckily interrupts will be remembered
15115 +                */
15116 +               do {
15117 +                       error = wait_event_interruptible(crp->crp_waitq,
15118 +                                       ((crp->crp_flags & CRYPTO_F_DONE) != 0));
15119 +                       /*
15120 +                        * we can't break out of this loop or we will leave behind
15121 +                        * a huge mess,  however,  staying here means if your driver
15122 +                        * is broken user applications can hang and not be killed.
15123 +                        * The solution,  fix your driver :-)
15124 +                        */
15125 +                       if (error) {
15126 +                               schedule();
15127 +                               error = 0;
15128 +                       }
15129 +               } while ((crp->crp_flags & CRYPTO_F_DONE) == 0);
15130 +               dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
15131 +       }
15132 +
15133 +       if (crp->crp_etype != 0) {
15134 +               error = crp->crp_etype;
15135 +               dprintk("%s error in crp processing\n", __FUNCTION__);
15136 +               goto bail;
15137 +       }
15138 +
15139 +       if (cse->error) {
15140 +               error = cse->error;
15141 +               dprintk("%s error in cse processing\n", __FUNCTION__);
15142 +               goto bail;
15143 +       }
15144 +
15145 +       if (cop->dst && (error = copy_to_user(cop->dst,
15146 +                                       cse->uio.uio_iov[0].iov_base, cop->len))) {
15147 +               dprintk("%s bad dst copy\n", __FUNCTION__);
15148 +               goto bail;
15149 +       }
15150 +
15151 +       if (cop->mac &&
15152 +                       (error=copy_to_user(cop->mac,
15153 +                               (caddr_t)cse->uio.uio_iov[0].iov_base + cop->len,
15154 +                               cse->info.authsize))) {
15155 +               dprintk("%s bad mac copy\n", __FUNCTION__);
15156 +               goto bail;
15157 +       }
15158 +
15159 +bail:
15160 +       if (crp)
15161 +               crypto_freereq(crp);
15162 +       if (cse->uio.uio_iov[0].iov_base)
15163 +               kfree(cse->uio.uio_iov[0].iov_base);
15164 +
15165 +       return (error);
15166 +}
15167 +
15168 +static int
15169 +cryptodev_cb(void *op)
15170 +{
15171 +       struct cryptop *crp = (struct cryptop *) op;
15172 +       struct csession *cse = (struct csession *)crp->crp_opaque;
15173 +       int error;
15174 +
15175 +       dprintk("%s()\n", __FUNCTION__);
15176 +       error = crp->crp_etype;
15177 +       if (error == EAGAIN) {
15178 +               crp->crp_flags &= ~CRYPTO_F_DONE;
15179 +#ifdef NOTYET
15180 +               /*
15181 +                * DAVIDM I am fairly sure that we should turn this into a batch
15182 +                * request to stop bad karma/lockup, revisit
15183 +                */
15184 +               crp->crp_flags |= CRYPTO_F_BATCH;
15185 +#endif
15186 +               return crypto_dispatch(crp);
15187 +       }
15188 +       if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
15189 +               cse->error = error;
15190 +               wake_up_interruptible(&crp->crp_waitq);
15191 +       }
15192 +       return (0);
15193 +}
15194 +
15195 +static int
15196 +cryptodevkey_cb(void *op)
15197 +{
15198 +       struct cryptkop *krp = (struct cryptkop *) op;
15199 +       dprintk("%s()\n", __FUNCTION__);
15200 +       wake_up_interruptible(&krp->krp_waitq);
15201 +       return (0);
15202 +}
15203 +
15204 +static int
15205 +cryptodev_key(struct crypt_kop *kop)
15206 +{
15207 +       struct cryptkop *krp = NULL;
15208 +       int error = EINVAL;
15209 +       int in, out, size, i;
15210 +
15211 +       dprintk("%s()\n", __FUNCTION__);
15212 +       if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) {
15213 +               dprintk("%s params too big\n", __FUNCTION__);
15214 +               return (EFBIG);
15215 +       }
15216 +
15217 +       in = kop->crk_iparams;
15218 +       out = kop->crk_oparams;
15219 +       switch (kop->crk_op) {
15220 +       case CRK_MOD_EXP:
15221 +               if (in == 3 && out == 1)
15222 +                       break;
15223 +               return (EINVAL);
15224 +       case CRK_MOD_EXP_CRT:
15225 +               if (in == 6 && out == 1)
15226 +                       break;
15227 +               return (EINVAL);
15228 +       case CRK_DSA_SIGN:
15229 +               if (in == 5 && out == 2)
15230 +                       break;
15231 +               return (EINVAL);
15232 +       case CRK_DSA_VERIFY:
15233 +               if (in == 7 && out == 0)
15234 +                       break;
15235 +               return (EINVAL);
15236 +       case CRK_DH_COMPUTE_KEY:
15237 +               if (in == 3 && out == 1)
15238 +                       break;
15239 +               return (EINVAL);
15240 +       default:
15241 +               return (EINVAL);
15242 +       }
15243 +
15244 +       krp = (struct cryptkop *)kmalloc(sizeof *krp, GFP_KERNEL);
15245 +       if (!krp)
15246 +               return (ENOMEM);
15247 +       bzero(krp, sizeof *krp);
15248 +       krp->krp_op = kop->crk_op;
15249 +       krp->krp_status = kop->crk_status;
15250 +       krp->krp_iparams = kop->crk_iparams;
15251 +       krp->krp_oparams = kop->crk_oparams;
15252 +       krp->krp_crid = kop->crk_crid;
15253 +       krp->krp_status = 0;
15254 +       krp->krp_flags = CRYPTO_KF_CBIMM;
15255 +       krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
15256 +       init_waitqueue_head(&krp->krp_waitq);
15257 +
15258 +       for (i = 0; i < CRK_MAXPARAM; i++)
15259 +               krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
15260 +       for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
15261 +               size = (krp->krp_param[i].crp_nbits + 7) / 8;
15262 +               if (size == 0)
15263 +                       continue;
15264 +               krp->krp_param[i].crp_p = (caddr_t) kmalloc(size, GFP_KERNEL);
15265 +               if (i >= krp->krp_iparams)
15266 +                       continue;
15267 +               error = copy_from_user(krp->krp_param[i].crp_p,
15268 +                               kop->crk_param[i].crp_p, size);
15269 +               if (error)
15270 +                       goto fail;
15271 +       }
15272 +
15273 +       error = crypto_kdispatch(krp);
15274 +       if (error)
15275 +               goto fail;
15276 +
15277 +       do {
15278 +               error = wait_event_interruptible(krp->krp_waitq,
15279 +                               ((krp->krp_flags & CRYPTO_KF_DONE) != 0));
15280 +               /*
15281 +                * we can't break out of this loop or we will leave behind
15282 +                * a huge mess,  however,  staying here means if your driver
15283 +                * is broken user applications can hang and not be killed.
15284 +                * The solution,  fix your driver :-)
15285 +                */
15286 +               if (error) {
15287 +                       schedule();
15288 +                       error = 0;
15289 +               }
15290 +       } while ((krp->krp_flags & CRYPTO_KF_DONE) == 0);
15291 +
15292 +       dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
15293 +       
15294 +       kop->crk_crid = krp->krp_crid;          /* device that did the work */
15295 +       if (krp->krp_status != 0) {
15296 +               error = krp->krp_status;
15297 +               goto fail;
15298 +       }
15299 +
15300 +       for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) {
15301 +               size = (krp->krp_param[i].crp_nbits + 7) / 8;
15302 +               if (size == 0)
15303 +                       continue;
15304 +               error = copy_to_user(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p,
15305 +                               size);
15306 +               if (error)
15307 +                       goto fail;
15308 +       }
15309 +
15310 +fail:
15311 +       if (krp) {
15312 +               kop->crk_status = krp->krp_status;
15313 +               for (i = 0; i < CRK_MAXPARAM; i++) {
15314 +                       if (krp->krp_param[i].crp_p)
15315 +                               kfree(krp->krp_param[i].crp_p);
15316 +               }
15317 +               kfree(krp);
15318 +       }
15319 +       return (error);
15320 +}
15321 +
15322 +static int
15323 +cryptodev_find(struct crypt_find_op *find)
15324 +{
15325 +       device_t dev;
15326 +
15327 +       if (find->crid != -1) {
15328 +               dev = crypto_find_device_byhid(find->crid);
15329 +               if (dev == NULL)
15330 +                       return (ENOENT);
15331 +               strlcpy(find->name, device_get_nameunit(dev),
15332 +                   sizeof(find->name));
15333 +       } else {
15334 +               find->crid = crypto_find_driver(find->name);
15335 +               if (find->crid == -1)
15336 +                       return (ENOENT);
15337 +       }
15338 +       return (0);
15339 +}
15340 +
15341 +static struct csession *
15342 +csefind(struct fcrypt *fcr, u_int ses)
15343 +{
15344 +       struct csession *cse;
15345 +
15346 +       dprintk("%s()\n", __FUNCTION__);
15347 +       list_for_each_entry(cse, &fcr->csessions, list)
15348 +               if (cse->ses == ses)
15349 +                       return (cse);
15350 +       return (NULL);
15351 +}
15352 +
15353 +static int
15354 +csedelete(struct fcrypt *fcr, struct csession *cse_del)
15355 +{
15356 +       struct csession *cse;
15357 +
15358 +       dprintk("%s()\n", __FUNCTION__);
15359 +       list_for_each_entry(cse, &fcr->csessions, list) {
15360 +               if (cse == cse_del) {
15361 +                       list_del(&cse->list);
15362 +                       return (1);
15363 +               }
15364 +       }
15365 +       return (0);
15366 +}
15367 +       
15368 +static struct csession *
15369 +cseadd(struct fcrypt *fcr, struct csession *cse)
15370 +{
15371 +       dprintk("%s()\n", __FUNCTION__);
15372 +       list_add_tail(&cse->list, &fcr->csessions);
15373 +       cse->ses = fcr->sesn++;
15374 +       return (cse);
15375 +}
15376 +
15377 +static struct csession *
15378 +csecreate(struct fcrypt *fcr, u_int64_t sid, struct cryptoini *crie,
15379 +       struct cryptoini *cria, struct csession_info *info)
15380 +{
15381 +       struct csession *cse;
15382 +
15383 +       dprintk("%s()\n", __FUNCTION__);
15384 +       cse = (struct csession *) kmalloc(sizeof(struct csession), GFP_KERNEL);
15385 +       if (cse == NULL)
15386 +               return NULL;
15387 +       memset(cse, 0, sizeof(struct csession));
15388 +
15389 +       INIT_LIST_HEAD(&cse->list);
15390 +       init_waitqueue_head(&cse->waitq);
15391 +
15392 +       cse->key = crie->cri_key;
15393 +       cse->keylen = crie->cri_klen/8;
15394 +       cse->mackey = cria->cri_key;
15395 +       cse->mackeylen = cria->cri_klen/8;
15396 +       cse->sid = sid;
15397 +       cse->cipher = crie->cri_alg;
15398 +       cse->mac = cria->cri_alg;
15399 +       cse->info = *info;
15400 +       cseadd(fcr, cse);
15401 +       return (cse);
15402 +}
15403 +
15404 +static int
15405 +csefree(struct csession *cse)
15406 +{
15407 +       int error;
15408 +
15409 +       dprintk("%s()\n", __FUNCTION__);
15410 +       error = crypto_freesession(cse->sid);
15411 +       if (cse->key)
15412 +               kfree(cse->key);
15413 +       if (cse->mackey)
15414 +               kfree(cse->mackey);
15415 +       kfree(cse);
15416 +       return(error);
15417 +}
15418 +
15419 +static int
15420 +cryptodev_ioctl(
15421 +       struct inode *inode,
15422 +       struct file *filp,
15423 +       unsigned int cmd,
15424 +       unsigned long arg)
15425 +{
15426 +       struct cryptoini cria, crie;
15427 +       struct fcrypt *fcr = filp->private_data;
15428 +       struct csession *cse;
15429 +       struct csession_info info;
15430 +       struct session2_op sop;
15431 +       struct crypt_op cop;
15432 +       struct crypt_kop kop;
15433 +       struct crypt_find_op fop;
15434 +       u_int64_t sid;
15435 +       u_int32_t ses;
15436 +       int feat, fd, error = 0, crid;
15437 +       mm_segment_t fs;
15438 +
15439 +       dprintk("%s(cmd=%x arg=%lx)\n", __FUNCTION__, cmd, arg);
15440 +
15441 +       switch (cmd) {
15442 +
15443 +       case CRIOGET: {
15444 +               dprintk("%s(CRIOGET)\n", __FUNCTION__);
15445 +               fs = get_fs();
15446 +               set_fs(get_ds());
15447 +               for (fd = 0; fd < files_fdtable(current->files)->max_fds; fd++)
15448 +                       if (files_fdtable(current->files)->fd[fd] == filp)
15449 +                               break;
15450 +               fd = sys_dup(fd);
15451 +               set_fs(fs);
15452 +               put_user(fd, (int *) arg);
15453 +               return IS_ERR_VALUE(fd) ? fd : 0;
15454 +               }
15455 +
15456 +#define        CIOCGSESSSTR    (cmd == CIOCGSESSION ? "CIOCGSESSION" : "CIOCGSESSION2")
15457 +       case CIOCGSESSION:
15458 +       case CIOCGSESSION2:
15459 +               dprintk("%s(%s)\n", __FUNCTION__, CIOCGSESSSTR);
15460 +               memset(&crie, 0, sizeof(crie));
15461 +               memset(&cria, 0, sizeof(cria));
15462 +               memset(&info, 0, sizeof(info));
15463 +               memset(&sop, 0, sizeof(sop));
15464 +
15465 +               if (copy_from_user(&sop, (void*)arg, (cmd == CIOCGSESSION) ?
15466 +                                       sizeof(struct session_op) : sizeof(sop))) {
15467 +                       dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15468 +                       error = EFAULT;
15469 +                       goto bail;
15470 +               }
15471 +
15472 +               switch (sop.cipher) {
15473 +               case 0:
15474 +                       dprintk("%s(%s) - no cipher\n", __FUNCTION__, CIOCGSESSSTR);
15475 +                       break;
15476 +               case CRYPTO_NULL_CBC:
15477 +                       info.blocksize = NULL_BLOCK_LEN;
15478 +                       info.minkey = NULL_MIN_KEY_LEN;
15479 +                       info.maxkey = NULL_MAX_KEY_LEN;
15480 +                       break;
15481 +               case CRYPTO_DES_CBC:
15482 +                       info.blocksize = DES_BLOCK_LEN;
15483 +                       info.minkey = DES_MIN_KEY_LEN;
15484 +                       info.maxkey = DES_MAX_KEY_LEN;
15485 +                       break;
15486 +               case CRYPTO_3DES_CBC:
15487 +                       info.blocksize = DES3_BLOCK_LEN;
15488 +                       info.minkey = DES3_MIN_KEY_LEN;
15489 +                       info.maxkey = DES3_MAX_KEY_LEN;
15490 +                       break;
15491 +               case CRYPTO_BLF_CBC:
15492 +                       info.blocksize = BLOWFISH_BLOCK_LEN;
15493 +                       info.minkey = BLOWFISH_MIN_KEY_LEN;
15494 +                       info.maxkey = BLOWFISH_MAX_KEY_LEN;
15495 +                       break;
15496 +               case CRYPTO_CAST_CBC:
15497 +                       info.blocksize = CAST128_BLOCK_LEN;
15498 +                       info.minkey = CAST128_MIN_KEY_LEN;
15499 +                       info.maxkey = CAST128_MAX_KEY_LEN;
15500 +                       break;
15501 +               case CRYPTO_SKIPJACK_CBC:
15502 +                       info.blocksize = SKIPJACK_BLOCK_LEN;
15503 +                       info.minkey = SKIPJACK_MIN_KEY_LEN;
15504 +                       info.maxkey = SKIPJACK_MAX_KEY_LEN;
15505 +                       break;
15506 +               case CRYPTO_AES_CBC:
15507 +                       info.blocksize = AES_BLOCK_LEN;
15508 +                       info.minkey = AES_MIN_KEY_LEN;
15509 +                       info.maxkey = AES_MAX_KEY_LEN;
15510 +                       break;
15511 +               case CRYPTO_ARC4:
15512 +                       info.blocksize = ARC4_BLOCK_LEN;
15513 +                       info.minkey = ARC4_MIN_KEY_LEN;
15514 +                       info.maxkey = ARC4_MAX_KEY_LEN;
15515 +                       break;
15516 +               case CRYPTO_CAMELLIA_CBC:
15517 +                       info.blocksize = CAMELLIA_BLOCK_LEN;
15518 +                       info.minkey = CAMELLIA_MIN_KEY_LEN;
15519 +                       info.maxkey = CAMELLIA_MAX_KEY_LEN;
15520 +                       break;
15521 +               default:
15522 +                       dprintk("%s(%s) - bad cipher\n", __FUNCTION__, CIOCGSESSSTR);
15523 +                       error = EINVAL;
15524 +                       goto bail;
15525 +               }
15526 +
15527 +               switch (sop.mac) {
15528 +               case 0:
15529 +                       dprintk("%s(%s) - no mac\n", __FUNCTION__, CIOCGSESSSTR);
15530 +                       break;
15531 +               case CRYPTO_NULL_HMAC:
15532 +                       info.authsize = NULL_HASH_LEN;
15533 +                       break;
15534 +               case CRYPTO_MD5:
15535 +                       info.authsize = MD5_HASH_LEN;
15536 +                       break;
15537 +               case CRYPTO_SHA1:
15538 +                       info.authsize = SHA1_HASH_LEN;
15539 +                       break;
15540 +               case CRYPTO_SHA2_256:
15541 +                       info.authsize = SHA2_256_HASH_LEN;
15542 +                       break;
15543 +               case CRYPTO_SHA2_384:
15544 +                       info.authsize = SHA2_384_HASH_LEN;
15545 +                       break;
15546 +               case CRYPTO_SHA2_512:
15547 +                       info.authsize = SHA2_512_HASH_LEN;
15548 +                       break;
15549 +               case CRYPTO_RIPEMD160:
15550 +                       info.authsize = RIPEMD160_HASH_LEN;
15551 +                       break;
15552 +               case CRYPTO_MD5_HMAC:
15553 +                       info.authsize = MD5_HASH_LEN;
15554 +                       break;
15555 +               case CRYPTO_SHA1_HMAC:
15556 +                       info.authsize = SHA1_HASH_LEN;
15557 +                       break;
15558 +               case CRYPTO_SHA2_256_HMAC:
15559 +                       info.authsize = SHA2_256_HASH_LEN;
15560 +                       break;
15561 +               case CRYPTO_SHA2_384_HMAC:
15562 +                       info.authsize = SHA2_384_HASH_LEN;
15563 +                       break;
15564 +               case CRYPTO_SHA2_512_HMAC:
15565 +                       info.authsize = SHA2_512_HASH_LEN;
15566 +                       break;
15567 +               case CRYPTO_RIPEMD160_HMAC:
15568 +                       info.authsize = RIPEMD160_HASH_LEN;
15569 +                       break;
15570 +               default:
15571 +                       dprintk("%s(%s) - bad mac\n", __FUNCTION__, CIOCGSESSSTR);
15572 +                       error = EINVAL;
15573 +                       goto bail;
15574 +               }
15575 +
15576 +               if (info.blocksize) {
15577 +                       crie.cri_alg = sop.cipher;
15578 +                       crie.cri_klen = sop.keylen * 8;
15579 +                       if ((info.maxkey && sop.keylen > info.maxkey) ||
15580 +                                       sop.keylen < info.minkey) {
15581 +                               dprintk("%s(%s) - bad key\n", __FUNCTION__, CIOCGSESSSTR);
15582 +                               error = EINVAL;
15583 +                               goto bail;
15584 +                       }
15585 +
15586 +                       crie.cri_key = (u_int8_t *) kmalloc(crie.cri_klen/8+1, GFP_KERNEL);
15587 +                       if (copy_from_user(crie.cri_key, sop.key,
15588 +                                                       crie.cri_klen/8)) {
15589 +                               dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15590 +                               error = EFAULT;
15591 +                               goto bail;
15592 +                       }
15593 +                       if (info.authsize)
15594 +                               crie.cri_next = &cria;
15595 +               }
15596 +
15597 +               if (info.authsize) {
15598 +                       cria.cri_alg = sop.mac;
15599 +                       cria.cri_klen = sop.mackeylen * 8;
15600 +                       if ((info.maxkey && sop.mackeylen > info.maxkey) ||
15601 +                                       sop.keylen < info.minkey) {
15602 +                               dprintk("%s(%s) - mackeylen %d\n", __FUNCTION__, CIOCGSESSSTR,
15603 +                                               sop.mackeylen);
15604 +                               error = EINVAL;
15605 +                               goto bail;
15606 +                       }
15607 +
15608 +                       if (cria.cri_klen) {
15609 +                               cria.cri_key = (u_int8_t *) kmalloc(cria.cri_klen/8,GFP_KERNEL);
15610 +                               if (copy_from_user(cria.cri_key, sop.mackey,
15611 +                                                               cria.cri_klen / 8)) {
15612 +                                       dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15613 +                                       error = EFAULT;
15614 +                                       goto bail;
15615 +                               }
15616 +                       }
15617 +               }
15618 +
15619 +               /* NB: CIOGSESSION2 has the crid */
15620 +               if (cmd == CIOCGSESSION2) {
15621 +                       crid = sop.crid;
15622 +                       error = checkcrid(crid);
15623 +                       if (error) {
15624 +                               dprintk("%s(%s) - checkcrid %x\n", __FUNCTION__,
15625 +                                               CIOCGSESSSTR, error);
15626 +                               goto bail;
15627 +                       }
15628 +               } else {
15629 +                       /* allow either HW or SW to be used */
15630 +                       crid = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
15631 +               }
15632 +               error = crypto_newsession(&sid, (info.blocksize ? &crie : &cria), crid);
15633 +               if (error) {
15634 +                       dprintk("%s(%s) - newsession %d\n",__FUNCTION__,CIOCGSESSSTR,error);
15635 +                       goto bail;
15636 +               }
15637 +
15638 +               cse = csecreate(fcr, sid, &crie, &cria, &info);
15639 +               if (cse == NULL) {
15640 +                       crypto_freesession(sid);
15641 +                       error = EINVAL;
15642 +                       dprintk("%s(%s) - csecreate failed\n", __FUNCTION__, CIOCGSESSSTR);
15643 +                       goto bail;
15644 +               }
15645 +               sop.ses = cse->ses;
15646 +
15647 +               if (cmd == CIOCGSESSION2) {
15648 +                       /* return hardware/driver id */
15649 +                       sop.crid = CRYPTO_SESID2HID(cse->sid);
15650 +               }
15651 +
15652 +               if (copy_to_user((void*)arg, &sop, (cmd == CIOCGSESSION) ?
15653 +                                       sizeof(struct session_op) : sizeof(sop))) {
15654 +                       dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15655 +                       error = EFAULT;
15656 +               }
15657 +bail:
15658 +               if (error) {
15659 +                       dprintk("%s(%s) - bail %d\n", __FUNCTION__, CIOCGSESSSTR, error);
15660 +                       if (crie.cri_key)
15661 +                               kfree(crie.cri_key);
15662 +                       if (cria.cri_key)
15663 +                               kfree(cria.cri_key);
15664 +               }
15665 +               break;
15666 +       case CIOCFSESSION:
15667 +               dprintk("%s(CIOCFSESSION)\n", __FUNCTION__);
15668 +               get_user(ses, (uint32_t*)arg);
15669 +               cse = csefind(fcr, ses);
15670 +               if (cse == NULL) {
15671 +                       error = EINVAL;
15672 +                       dprintk("%s(CIOCFSESSION) - Fail %d\n", __FUNCTION__, error);
15673 +                       break;
15674 +               }
15675 +               csedelete(fcr, cse);
15676 +               error = csefree(cse);
15677 +               break;
15678 +       case CIOCCRYPT:
15679 +               dprintk("%s(CIOCCRYPT)\n", __FUNCTION__);
15680 +               if(copy_from_user(&cop, (void*)arg, sizeof(cop))) {
15681 +                       dprintk("%s(CIOCCRYPT) - bad copy\n", __FUNCTION__);
15682 +                       error = EFAULT;
15683 +                       goto bail;
15684 +               }
15685 +               cse = csefind(fcr, cop.ses);
15686 +               if (cse == NULL) {
15687 +                       error = EINVAL;
15688 +                       dprintk("%s(CIOCCRYPT) - Fail %d\n", __FUNCTION__, error);
15689 +                       break;
15690 +               }
15691 +               error = cryptodev_op(cse, &cop);
15692 +               if(copy_to_user((void*)arg, &cop, sizeof(cop))) {
15693 +                       dprintk("%s(CIOCCRYPT) - bad return copy\n", __FUNCTION__);
15694 +                       error = EFAULT;
15695 +                       goto bail;
15696 +               }
15697 +               break;
15698 +       case CIOCKEY:
15699 +       case CIOCKEY2:
15700 +               dprintk("%s(CIOCKEY)\n", __FUNCTION__);
15701 +               if (!crypto_userasymcrypto)
15702 +                       return (EPERM);         /* XXX compat? */
15703 +               if(copy_from_user(&kop, (void*)arg, sizeof(kop))) {
15704 +                       dprintk("%s(CIOCKEY) - bad copy\n", __FUNCTION__);
15705 +                       error = EFAULT;
15706 +                       goto bail;
15707 +               }
15708 +               if (cmd == CIOCKEY) {
15709 +                       /* NB: crypto core enforces s/w driver use */
15710 +                       kop.crk_crid =
15711 +                           CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
15712 +               }
15713 +               error = cryptodev_key(&kop);
15714 +               if(copy_to_user((void*)arg, &kop, sizeof(kop))) {
15715 +                       dprintk("%s(CIOCGKEY) - bad return copy\n", __FUNCTION__);
15716 +                       error = EFAULT;
15717 +                       goto bail;
15718 +               }
15719 +               break;
15720 +       case CIOCASYMFEAT:
15721 +               dprintk("%s(CIOCASYMFEAT)\n", __FUNCTION__);
15722 +               if (!crypto_userasymcrypto) {
15723 +                       /*
15724 +                        * NB: if user asym crypto operations are
15725 +                        * not permitted return "no algorithms"
15726 +                        * so well-behaved applications will just
15727 +                        * fallback to doing them in software.
15728 +                        */
15729 +                       feat = 0;
15730 +               } else
15731 +                       error = crypto_getfeat(&feat);
15732 +               if (!error) {
15733 +                 error = copy_to_user((void*)arg, &feat, sizeof(feat));
15734 +               }
15735 +               break;
15736 +       case CIOCFINDDEV:
15737 +               if (copy_from_user(&fop, (void*)arg, sizeof(fop))) {
15738 +                       dprintk("%s(CIOCFINDDEV) - bad copy\n", __FUNCTION__);
15739 +                       error = EFAULT;
15740 +                       goto bail;
15741 +               }
15742 +               error = cryptodev_find(&fop);
15743 +               if (copy_to_user((void*)arg, &fop, sizeof(fop))) {
15744 +                       dprintk("%s(CIOCFINDDEV) - bad return copy\n", __FUNCTION__);
15745 +                       error = EFAULT;
15746 +                       goto bail;
15747 +               }
15748 +               break;
15749 +       default:
15750 +               dprintk("%s(unknown ioctl 0x%x)\n", __FUNCTION__, cmd);
15751 +               error = EINVAL;
15752 +               break;
15753 +       }
15754 +       return(-error);
15755 +}
15756 +
15757 +#ifdef HAVE_UNLOCKED_IOCTL
15758 +static long
15759 +cryptodev_unlocked_ioctl(
15760 +       struct file *filp,
15761 +       unsigned int cmd,
15762 +       unsigned long arg)
15763 +{
15764 +       return cryptodev_ioctl(NULL, filp, cmd, arg);
15765 +}
15766 +#endif
15767 +
15768 +static int
15769 +cryptodev_open(struct inode *inode, struct file *filp)
15770 +{
15771 +       struct fcrypt *fcr;
15772 +
15773 +       dprintk("%s()\n", __FUNCTION__);
15774 +       if (filp->private_data) {
15775 +               printk("cryptodev: Private data already exists !\n");
15776 +               return(0);
15777 +       }
15778 +
15779 +       fcr = kmalloc(sizeof(*fcr), GFP_KERNEL);
15780 +       if (!fcr) {
15781 +               dprintk("%s() - malloc failed\n", __FUNCTION__);
15782 +               return(-ENOMEM);
15783 +       }
15784 +       memset(fcr, 0, sizeof(*fcr));
15785 +
15786 +       INIT_LIST_HEAD(&fcr->csessions);
15787 +       filp->private_data = fcr;
15788 +       return(0);
15789 +}
15790 +
15791 +static int
15792 +cryptodev_release(struct inode *inode, struct file *filp)
15793 +{
15794 +       struct fcrypt *fcr = filp->private_data;
15795 +       struct csession *cse, *tmp;
15796 +
15797 +       dprintk("%s()\n", __FUNCTION__);
15798 +       if (!filp) {
15799 +               printk("cryptodev: No private data on release\n");
15800 +               return(0);
15801 +       }
15802 +
15803 +       list_for_each_entry_safe(cse, tmp, &fcr->csessions, list) {
15804 +               list_del(&cse->list);
15805 +               (void)csefree(cse);
15806 +       }
15807 +       filp->private_data = NULL;
15808 +       kfree(fcr);
15809 +       return(0);
15810 +}
15811 +
15812 +static struct file_operations cryptodev_fops = {
15813 +       .owner = THIS_MODULE,
15814 +       .open = cryptodev_open,
15815 +       .release = cryptodev_release,
15816 +       .ioctl = cryptodev_ioctl,
15817 +#ifdef HAVE_UNLOCKED_IOCTL
15818 +       .unlocked_ioctl = cryptodev_unlocked_ioctl,
15819 +#endif
15820 +};
15821 +
15822 +static struct miscdevice cryptodev = {
15823 +       .minor = CRYPTODEV_MINOR,
15824 +       .name = "crypto",
15825 +       .fops = &cryptodev_fops,
15826 +};
15827 +
15828 +static int __init
15829 +cryptodev_init(void)
15830 +{
15831 +       int rc;
15832 +
15833 +       dprintk("%s(%p)\n", __FUNCTION__, cryptodev_init);
15834 +       rc = misc_register(&cryptodev);
15835 +       if (rc) {
15836 +               printk(KERN_ERR "cryptodev: registration of /dev/crypto failed\n");
15837 +               return(rc);
15838 +       }
15839 +
15840 +       return(0);
15841 +}
15842 +
15843 +static void __exit
15844 +cryptodev_exit(void)
15845 +{
15846 +       dprintk("%s()\n", __FUNCTION__);
15847 +       misc_deregister(&cryptodev);
15848 +}
15849 +
15850 +module_init(cryptodev_init);
15851 +module_exit(cryptodev_exit);
15852 +
15853 +MODULE_LICENSE("BSD");
15854 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
15855 +MODULE_DESCRIPTION("Cryptodev (user interface to OCF)");
15856 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
15857 +++ linux/crypto/ocf/cryptodev.h        2007-09-26 22:15:05.000000000 +1000
15858 @@ -0,0 +1,478 @@
15859 +/*     $FreeBSD: src/sys/opencrypto/cryptodev.h,v 1.25 2007/05/09 19:37:02 gnn Exp $   */
15860 +/*     $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $     */
15861 +
15862 +/*-
15863 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
15864 + * Copyright (C) 2006-2007 David McCullough
15865 + * Copyright (C) 2004-2005 Intel Corporation.
15866 + * The license and original author are listed below.
15867 + *
15868 + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
15869 + * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
15870 + *
15871 + * This code was written by Angelos D. Keromytis in Athens, Greece, in
15872 + * February 2000. Network Security Technologies Inc. (NSTI) kindly
15873 + * supported the development of this code.
15874 + *
15875 + * Copyright (c) 2000 Angelos D. Keromytis
15876 + *
15877 + * Permission to use, copy, and modify this software with or without fee
15878 + * is hereby granted, provided that this entire notice is included in
15879 + * all source code copies of any software which is or includes a copy or
15880 + * modification of this software.
15881 + *
15882 + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
15883 + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
15884 + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
15885 + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
15886 + * PURPOSE.
15887 + *
15888 + * Copyright (c) 2001 Theo de Raadt
15889 + *
15890 + * Redistribution and use in source and binary forms, with or without
15891 + * modification, are permitted provided that the following conditions
15892 + * are met:
15893 + *
15894 + * 1. Redistributions of source code must retain the above copyright
15895 + *   notice, this list of conditions and the following disclaimer.
15896 + * 2. Redistributions in binary form must reproduce the above copyright
15897 + *   notice, this list of conditions and the following disclaimer in the
15898 + *   documentation and/or other materials provided with the distribution.
15899 + * 3. The name of the author may not be used to endorse or promote products
15900 + *   derived from this software without specific prior written permission.
15901 + *
15902 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15903 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15904 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
15905 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
15906 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
15907 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
15908 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
15909 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
15910 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
15911 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15912 + *
15913 + * Effort sponsored in part by the Defense Advanced Research Projects
15914 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
15915 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
15916 + *
15917 + */
15918 +
15919 +#ifndef _CRYPTO_CRYPTO_H_
15920 +#define _CRYPTO_CRYPTO_H_
15921 +
15922 +/* Some initial values */
15923 +#define CRYPTO_DRIVERS_INITIAL 4
15924 +#define CRYPTO_SW_SESSIONS     32
15925 +
15926 +/* Hash values */
15927 +#define NULL_HASH_LEN          0
15928 +#define MD5_HASH_LEN           16
15929 +#define SHA1_HASH_LEN          20
15930 +#define RIPEMD160_HASH_LEN     20
15931 +#define SHA2_256_HASH_LEN      32
15932 +#define SHA2_384_HASH_LEN      48
15933 +#define SHA2_512_HASH_LEN      64
15934 +#define MD5_KPDK_HASH_LEN      16
15935 +#define SHA1_KPDK_HASH_LEN     20
15936 +/* Maximum hash algorithm result length */
15937 +#define HASH_MAX_LEN           SHA2_512_HASH_LEN /* Keep this updated */
15938 +
15939 +/* HMAC values */
15940 +#define NULL_HMAC_BLOCK_LEN                    1
15941 +#define MD5_HMAC_BLOCK_LEN                     64
15942 +#define SHA1_HMAC_BLOCK_LEN                    64
15943 +#define RIPEMD160_HMAC_BLOCK_LEN       64
15944 +#define SHA2_256_HMAC_BLOCK_LEN                64
15945 +#define SHA2_384_HMAC_BLOCK_LEN                128
15946 +#define SHA2_512_HMAC_BLOCK_LEN                128
15947 +/* Maximum HMAC block length */
15948 +#define HMAC_MAX_BLOCK_LEN             SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */
15949 +#define HMAC_IPAD_VAL                  0x36
15950 +#define HMAC_OPAD_VAL                  0x5C
15951 +
15952 +/* Encryption algorithm block sizes */
15953 +#define NULL_BLOCK_LEN                 1
15954 +#define DES_BLOCK_LEN                  8
15955 +#define DES3_BLOCK_LEN                 8
15956 +#define BLOWFISH_BLOCK_LEN             8
15957 +#define SKIPJACK_BLOCK_LEN             8
15958 +#define CAST128_BLOCK_LEN              8
15959 +#define RIJNDAEL128_BLOCK_LEN  16
15960 +#define AES_BLOCK_LEN                  RIJNDAEL128_BLOCK_LEN
15961 +#define CAMELLIA_BLOCK_LEN             16
15962 +#define ARC4_BLOCK_LEN                 1
15963 +#define EALG_MAX_BLOCK_LEN             AES_BLOCK_LEN /* Keep this updated */
15964 +
15965 +/* Encryption algorithm min and max key sizes */
15966 +#define NULL_MIN_KEY_LEN               0
15967 +#define NULL_MAX_KEY_LEN               0
15968 +#define DES_MIN_KEY_LEN                        8
15969 +#define DES_MAX_KEY_LEN                        8
15970 +#define DES3_MIN_KEY_LEN               24
15971 +#define DES3_MAX_KEY_LEN               24
15972 +#define BLOWFISH_MIN_KEY_LEN   4
15973 +#define BLOWFISH_MAX_KEY_LEN   56
15974 +#define SKIPJACK_MIN_KEY_LEN   10
15975 +#define SKIPJACK_MAX_KEY_LEN   10
15976 +#define CAST128_MIN_KEY_LEN            5
15977 +#define CAST128_MAX_KEY_LEN            16
15978 +#define RIJNDAEL128_MIN_KEY_LEN        16
15979 +#define RIJNDAEL128_MAX_KEY_LEN        32
15980 +#define AES_MIN_KEY_LEN                        RIJNDAEL128_MIN_KEY_LEN
15981 +#define AES_MAX_KEY_LEN                        RIJNDAEL128_MAX_KEY_LEN
15982 +#define CAMELLIA_MIN_KEY_LEN   16
15983 +#define CAMELLIA_MAX_KEY_LEN   32
15984 +#define ARC4_MIN_KEY_LEN               1
15985 +#define ARC4_MAX_KEY_LEN               256
15986 +
15987 +/* Max size of data that can be processed */
15988 +#define CRYPTO_MAX_DATA_LEN            64*1024 - 1
15989 +
15990 +#define CRYPTO_ALGORITHM_MIN   1
15991 +#define CRYPTO_DES_CBC                 1
15992 +#define CRYPTO_3DES_CBC                        2
15993 +#define CRYPTO_BLF_CBC                 3
15994 +#define CRYPTO_CAST_CBC                        4
15995 +#define CRYPTO_SKIPJACK_CBC            5
15996 +#define CRYPTO_MD5_HMAC                        6
15997 +#define CRYPTO_SHA1_HMAC               7
15998 +#define CRYPTO_RIPEMD160_HMAC  8
15999 +#define CRYPTO_MD5_KPDK                        9
16000 +#define CRYPTO_SHA1_KPDK               10
16001 +#define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */
16002 +#define CRYPTO_AES_CBC                 11 /* 128 bit blocksize -- the same as above */
16003 +#define CRYPTO_ARC4                            12
16004 +#define CRYPTO_MD5                             13
16005 +#define CRYPTO_SHA1                            14
16006 +#define CRYPTO_NULL_HMAC               15
16007 +#define CRYPTO_NULL_CBC                        16
16008 +#define CRYPTO_DEFLATE_COMP            17 /* Deflate compression algorithm */
16009 +#define CRYPTO_SHA2_256_HMAC   18
16010 +#define CRYPTO_SHA2_384_HMAC   19
16011 +#define CRYPTO_SHA2_512_HMAC   20
16012 +#define CRYPTO_CAMELLIA_CBC            21
16013 +#define CRYPTO_SHA2_256                        22
16014 +#define CRYPTO_SHA2_384                        23
16015 +#define CRYPTO_SHA2_512                        24
16016 +#define CRYPTO_RIPEMD160               25
16017 +#define CRYPTO_ALGORITHM_MAX   25 /* Keep updated - see below */
16018 +
16019 +/* Algorithm flags */
16020 +#define CRYPTO_ALG_FLAG_SUPPORTED      0x01 /* Algorithm is supported */
16021 +#define CRYPTO_ALG_FLAG_RNG_ENABLE     0x02 /* Has HW RNG for DH/DSA */
16022 +#define CRYPTO_ALG_FLAG_DSA_SHA                0x04 /* Can do SHA on msg */
16023 +
16024 +/*
16025 + * Crypto driver/device flags.  They can set in the crid
16026 + * parameter when creating a session or submitting a key
16027 + * op to affect the device/driver assigned.  If neither
16028 + * of these are specified then the crid is assumed to hold
16029 + * the driver id of an existing (and suitable) device that
16030 + * must be used to satisfy the request.
16031 + */
16032 +#define CRYPTO_FLAG_HARDWARE   0x01000000      /* hardware accelerated */
16033 +#define CRYPTO_FLAG_SOFTWARE   0x02000000      /* software implementation */
16034 +
16035 +/* NB: deprecated */
16036 +struct session_op {
16037 +       u_int32_t       cipher;         /* ie. CRYPTO_DES_CBC */
16038 +       u_int32_t       mac;            /* ie. CRYPTO_MD5_HMAC */
16039 +
16040 +       u_int32_t       keylen;         /* cipher key */
16041 +       caddr_t         key;
16042 +       int             mackeylen;      /* mac key */
16043 +       caddr_t         mackey;
16044 +
16045 +       u_int32_t       ses;            /* returns: session # */ 
16046 +};
16047 +
16048 +struct session2_op {
16049 +       u_int32_t       cipher;         /* ie. CRYPTO_DES_CBC */
16050 +       u_int32_t       mac;            /* ie. CRYPTO_MD5_HMAC */
16051 +
16052 +       u_int32_t       keylen;         /* cipher key */
16053 +       caddr_t         key;
16054 +       int             mackeylen;      /* mac key */
16055 +       caddr_t         mackey;
16056 +
16057 +       u_int32_t       ses;            /* returns: session # */ 
16058 +       int             crid;           /* driver id + flags (rw) */
16059 +       int             pad[4];         /* for future expansion */
16060 +};
16061 +
16062 +struct crypt_op {
16063 +       u_int32_t       ses;
16064 +       u_int16_t       op;             /* i.e. COP_ENCRYPT */
16065 +#define COP_NONE       0
16066 +#define COP_ENCRYPT    1
16067 +#define COP_DECRYPT    2
16068 +       u_int16_t       flags;
16069 +#define        COP_F_BATCH     0x0008          /* Batch op if possible */
16070 +       u_int           len;
16071 +       caddr_t         src, dst;       /* become iov[] inside kernel */
16072 +       caddr_t         mac;            /* must be big enough for chosen MAC */
16073 +       caddr_t         iv;
16074 +};
16075 +
16076 +/*
16077 + * Parameters for looking up a crypto driver/device by
16078 + * device name or by id.  The latter are returned for
16079 + * created sessions (crid) and completed key operations.
16080 + */
16081 +struct crypt_find_op {
16082 +       int             crid;           /* driver id + flags */
16083 +       char            name[32];       /* device/driver name */
16084 +};
16085 +
16086 +/* bignum parameter, in packed bytes, ... */
16087 +struct crparam {
16088 +       caddr_t         crp_p;
16089 +       u_int           crp_nbits;
16090 +};
16091 +
16092 +#define CRK_MAXPARAM   8
16093 +
16094 +struct crypt_kop {
16095 +       u_int           crk_op;         /* ie. CRK_MOD_EXP or other */
16096 +       u_int           crk_status;     /* return status */
16097 +       u_short         crk_iparams;    /* # of input parameters */
16098 +       u_short         crk_oparams;    /* # of output parameters */
16099 +       u_int           crk_crid;       /* NB: only used by CIOCKEY2 (rw) */
16100 +       struct crparam  crk_param[CRK_MAXPARAM];
16101 +};
16102 +#define CRK_ALGORITM_MIN       0
16103 +#define CRK_MOD_EXP            0
16104 +#define CRK_MOD_EXP_CRT                1
16105 +#define CRK_DSA_SIGN           2
16106 +#define CRK_DSA_VERIFY         3
16107 +#define CRK_DH_COMPUTE_KEY     4
16108 +#define CRK_ALGORITHM_MAX      4 /* Keep updated - see below */
16109 +
16110 +#define CRF_MOD_EXP            (1 << CRK_MOD_EXP)
16111 +#define CRF_MOD_EXP_CRT                (1 << CRK_MOD_EXP_CRT)
16112 +#define CRF_DSA_SIGN           (1 << CRK_DSA_SIGN)
16113 +#define CRF_DSA_VERIFY         (1 << CRK_DSA_VERIFY)
16114 +#define CRF_DH_COMPUTE_KEY     (1 << CRK_DH_COMPUTE_KEY)
16115 +
16116 +/*
16117 + * done against open of /dev/crypto, to get a cloned descriptor.
16118 + * Please use F_SETFD against the cloned descriptor.
16119 + */
16120 +#define CRIOGET                _IOWR('c', 100, u_int32_t)
16121 +#define CRIOASYMFEAT   CIOCASYMFEAT
16122 +#define CRIOFINDDEV    CIOCFINDDEV
16123 +
16124 +/* the following are done against the cloned descriptor */
16125 +#define CIOCGSESSION   _IOWR('c', 101, struct session_op)
16126 +#define CIOCFSESSION   _IOW('c', 102, u_int32_t)
16127 +#define CIOCCRYPT      _IOWR('c', 103, struct crypt_op)
16128 +#define CIOCKEY                _IOWR('c', 104, struct crypt_kop)
16129 +#define CIOCASYMFEAT   _IOR('c', 105, u_int32_t)
16130 +#define CIOCGSESSION2  _IOWR('c', 106, struct session2_op)
16131 +#define CIOCKEY2       _IOWR('c', 107, struct crypt_kop)
16132 +#define CIOCFINDDEV    _IOWR('c', 108, struct crypt_find_op)
16133 +
16134 +struct cryptotstat {
16135 +       struct timespec acc;            /* total accumulated time */
16136 +       struct timespec min;            /* min time */
16137 +       struct timespec max;            /* max time */
16138 +       u_int32_t       count;          /* number of observations */
16139 +};
16140 +
16141 +struct cryptostats {
16142 +       u_int32_t       cs_ops;         /* symmetric crypto ops submitted */
16143 +       u_int32_t       cs_errs;        /* symmetric crypto ops that failed */
16144 +       u_int32_t       cs_kops;        /* asymetric/key ops submitted */
16145 +       u_int32_t       cs_kerrs;       /* asymetric/key ops that failed */
16146 +       u_int32_t       cs_intrs;       /* crypto swi thread activations */
16147 +       u_int32_t       cs_rets;        /* crypto return thread activations */
16148 +       u_int32_t       cs_blocks;      /* symmetric op driver block */
16149 +       u_int32_t       cs_kblocks;     /* symmetric op driver block */
16150 +       /*
16151 +        * When CRYPTO_TIMING is defined at compile time and the
16152 +        * sysctl debug.crypto is set to 1, the crypto system will
16153 +        * accumulate statistics about how long it takes to process
16154 +        * crypto requests at various points during processing.
16155 +        */
16156 +       struct cryptotstat cs_invoke;   /* crypto_dipsatch -> crypto_invoke */
16157 +       struct cryptotstat cs_done;     /* crypto_invoke -> crypto_done */
16158 +       struct cryptotstat cs_cb;       /* crypto_done -> callback */
16159 +       struct cryptotstat cs_finis;    /* callback -> callback return */
16160 +
16161 +       u_int32_t       cs_drops;               /* crypto ops dropped due to congestion */
16162 +};
16163 +
16164 +#ifdef __KERNEL__
16165 +
16166 +/* Standard initialization structure beginning */
16167 +struct cryptoini {
16168 +       int             cri_alg;        /* Algorithm to use */
16169 +       int             cri_klen;       /* Key length, in bits */
16170 +       int             cri_mlen;       /* Number of bytes we want from the
16171 +                                          entire hash. 0 means all. */
16172 +       caddr_t         cri_key;        /* key to use */
16173 +       u_int8_t        cri_iv[EALG_MAX_BLOCK_LEN];     /* IV to use */
16174 +       struct cryptoini *cri_next;
16175 +};
16176 +
16177 +/* Describe boundaries of a single crypto operation */
16178 +struct cryptodesc {
16179 +       int             crd_skip;       /* How many bytes to ignore from start */
16180 +       int             crd_len;        /* How many bytes to process */
16181 +       int             crd_inject;     /* Where to inject results, if applicable */
16182 +       int             crd_flags;
16183 +
16184 +#define CRD_F_ENCRYPT          0x01    /* Set when doing encryption */
16185 +#define CRD_F_IV_PRESENT       0x02    /* When encrypting, IV is already in
16186 +                                          place, so don't copy. */
16187 +#define CRD_F_IV_EXPLICIT      0x04    /* IV explicitly provided */
16188 +#define CRD_F_DSA_SHA_NEEDED   0x08    /* Compute SHA-1 of buffer for DSA */
16189 +#define CRD_F_KEY_EXPLICIT     0x10    /* Key explicitly provided */
16190 +#define CRD_F_COMP             0x0f    /* Set when doing compression */
16191 +
16192 +       struct cryptoini        CRD_INI; /* Initialization/context data */
16193 +#define crd_iv         CRD_INI.cri_iv
16194 +#define crd_key                CRD_INI.cri_key
16195 +#define crd_alg                CRD_INI.cri_alg
16196 +#define crd_klen       CRD_INI.cri_klen
16197 +
16198 +       struct cryptodesc *crd_next;
16199 +};
16200 +
16201 +/* Structure describing complete operation */
16202 +struct cryptop {
16203 +       struct list_head crp_next;
16204 +       wait_queue_head_t crp_waitq;
16205 +
16206 +       u_int64_t       crp_sid;        /* Session ID */
16207 +       int             crp_ilen;       /* Input data total length */
16208 +       int             crp_olen;       /* Result total length */
16209 +
16210 +       int             crp_etype;      /*
16211 +                                        * Error type (zero means no error).
16212 +                                        * All error codes except EAGAIN
16213 +                                        * indicate possible data corruption (as in,
16214 +                                        * the data have been touched). On all
16215 +                                        * errors, the crp_sid may have changed
16216 +                                        * (reset to a new one), so the caller
16217 +                                        * should always check and use the new
16218 +                                        * value on future requests.
16219 +                                        */
16220 +       int             crp_flags;
16221 +
16222 +#define CRYPTO_F_SKBUF         0x0001  /* Input/output are skbuf chains */
16223 +#define CRYPTO_F_IOV           0x0002  /* Input/output are uio */
16224 +#define CRYPTO_F_REL           0x0004  /* Must return data in same place */
16225 +#define CRYPTO_F_BATCH         0x0008  /* Batch op if possible */
16226 +#define CRYPTO_F_CBIMM         0x0010  /* Do callback immediately */
16227 +#define CRYPTO_F_DONE          0x0020  /* Operation completed */
16228 +#define CRYPTO_F_CBIFSYNC      0x0040  /* Do CBIMM if op is synchronous */
16229 +
16230 +       caddr_t         crp_buf;        /* Data to be processed */
16231 +       caddr_t         crp_opaque;     /* Opaque pointer, passed along */
16232 +       struct cryptodesc *crp_desc;    /* Linked list of processing descriptors */
16233 +
16234 +       int (*crp_callback)(struct cryptop *); /* Callback function */
16235 +};
16236 +
16237 +#define CRYPTO_BUF_CONTIG      0x0
16238 +#define CRYPTO_BUF_IOV         0x1
16239 +#define CRYPTO_BUF_SKBUF               0x2
16240 +
16241 +#define CRYPTO_OP_DECRYPT      0x0
16242 +#define CRYPTO_OP_ENCRYPT      0x1
16243 +
16244 +/*
16245 + * Hints passed to process methods.
16246 + */
16247 +#define CRYPTO_HINT_MORE       0x1     /* more ops coming shortly */
16248 +
16249 +struct cryptkop {
16250 +       struct list_head krp_next;
16251 +       wait_queue_head_t krp_waitq;
16252 +
16253 +       int             krp_flags;
16254 +#define CRYPTO_KF_DONE         0x0001  /* Operation completed */
16255 +#define CRYPTO_KF_CBIMM                0x0002  /* Do callback immediately */
16256 +
16257 +       u_int           krp_op;         /* ie. CRK_MOD_EXP or other */
16258 +       u_int           krp_status;     /* return status */
16259 +       u_short         krp_iparams;    /* # of input parameters */
16260 +       u_short         krp_oparams;    /* # of output parameters */
16261 +       u_int           krp_crid;       /* desired device, etc. */
16262 +       u_int32_t       krp_hid;
16263 +       struct crparam  krp_param[CRK_MAXPARAM];        /* kvm */
16264 +       int             (*krp_callback)(struct cryptkop *);
16265 +};
16266 +
16267 +#include <ocf-compat.h>
16268 +
16269 +/*
16270 + * Session ids are 64 bits.  The lower 32 bits contain a "local id" which
16271 + * is a driver-private session identifier.  The upper 32 bits contain a
16272 + * "hardware id" used by the core crypto code to identify the driver and
16273 + * a copy of the driver's capabilities that can be used by client code to
16274 + * optimize operation.
16275 + */
16276 +#define CRYPTO_SESID2HID(_sid) (((_sid) >> 32) & 0x00ffffff)
16277 +#define CRYPTO_SESID2CAPS(_sid)        (((_sid) >> 32) & 0xff000000)
16278 +#define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff)
16279 +
16280 +extern int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard);
16281 +extern int crypto_freesession(u_int64_t sid);
16282 +#define CRYPTOCAP_F_HARDWARE   CRYPTO_FLAG_HARDWARE
16283 +#define CRYPTOCAP_F_SOFTWARE   CRYPTO_FLAG_SOFTWARE
16284 +#define CRYPTOCAP_F_SYNC       0x04000000      /* operates synchronously */
16285 +extern int32_t crypto_get_driverid(device_t dev, int flags);
16286 +extern int crypto_find_driver(const char *);
16287 +extern device_t crypto_find_device_byhid(int hid);
16288 +extern int crypto_getcaps(int hid);
16289 +extern int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
16290 +           u_int32_t flags);
16291 +extern int crypto_kregister(u_int32_t, int, u_int32_t);
16292 +extern int crypto_unregister(u_int32_t driverid, int alg);
16293 +extern int crypto_unregister_all(u_int32_t driverid);
16294 +extern int crypto_dispatch(struct cryptop *crp);
16295 +extern int crypto_kdispatch(struct cryptkop *);
16296 +#define CRYPTO_SYMQ    0x1
16297 +#define CRYPTO_ASYMQ   0x2
16298 +extern int crypto_unblock(u_int32_t, int);
16299 +extern void crypto_done(struct cryptop *crp);
16300 +extern void crypto_kdone(struct cryptkop *);
16301 +extern int crypto_getfeat(int *);
16302 +
16303 +extern void crypto_freereq(struct cryptop *crp);
16304 +extern struct cryptop *crypto_getreq(int num);
16305 +
16306 +extern  int crypto_usercrypto;      /* userland may do crypto requests */
16307 +extern  int crypto_userasymcrypto;  /* userland may do asym crypto reqs */
16308 +extern  int crypto_devallowsoft;    /* only use hardware crypto */
16309 +
16310 +/*
16311 + * random number support,  crypto_unregister_all will unregister
16312 + */
16313 +extern int crypto_rregister(u_int32_t driverid,
16314 +               int (*read_random)(void *arg, u_int32_t *buf, int len), void *arg);
16315 +extern int crypto_runregister_all(u_int32_t driverid);
16316 +
16317 +/*
16318 + * Crypto-related utility routines used mainly by drivers.
16319 + *
16320 + * XXX these don't really belong here; but for now they're
16321 + *     kept apart from the rest of the system.
16322 + */
16323 +struct uio;
16324 +extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
16325 +extern void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp);
16326 +extern struct iovec *cuio_getptr(struct uio *uio, int loc, int *off);
16327 +
16328 +extern void crypto_copyback(int flags, caddr_t buf, int off, int size,
16329 +           caddr_t in);
16330 +extern void crypto_copydata(int flags, caddr_t buf, int off, int size,
16331 +           caddr_t out);
16332 +extern int crypto_apply(int flags, caddr_t buf, int off, int len,
16333 +           int (*f)(void *, void *, u_int), void *arg);
16334 +
16335 +#endif /* __KERNEL__ */
16336 +#endif /* _CRYPTO_CRYPTO_H_ */
16337 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
16338 +++ linux/crypto/ocf/ocfnull/ocfnull.c  2007-07-13 16:02:05.000000000 +1000
16339 @@ -0,0 +1,203 @@
16340 +/*
16341 + * An OCF module for determining the cost of crypto versus the cost of
16342 + * IPSec processing outside of OCF.  This modules gives us the effect of
16343 + * zero cost encryption,  of course you will need to run it at both ends
16344 + * since it does no crypto at all.
16345 + *
16346 + * Written by David McCullough <david_mccullough@securecomputing.com>
16347 + * Copyright (C) 2006-2007 David McCullough 
16348 + *
16349 + * LICENSE TERMS
16350 + *
16351 + * The free distribution and use of this software in both source and binary
16352 + * form is allowed (with or without changes) provided that:
16353 + *
16354 + *   1. distributions of this source code include the above copyright
16355 + *      notice, this list of conditions and the following disclaimer;
16356 + *
16357 + *   2. distributions in binary form include the above copyright
16358 + *      notice, this list of conditions and the following disclaimer
16359 + *      in the documentation and/or other associated materials;
16360 + *
16361 + *   3. the copyright holder's name is not used to endorse products
16362 + *      built using this software without specific written permission.
16363 + *
16364 + * ALTERNATIVELY, provided that this notice is retained in full, this product
16365 + * may be distributed under the terms of the GNU General Public License (GPL),
16366 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
16367 + *
16368 + * DISCLAIMER
16369 + *
16370 + * This software is provided 'as is' with no explicit or implied warranties
16371 + * in respect of its properties, including, but not limited to, correctness
16372 + * and/or fitness for purpose.
16373 + */
16374 +
16375 +#ifndef AUTOCONF_INCLUDED
16376 +#include <linux/config.h>
16377 +#endif
16378 +#include <linux/module.h>
16379 +#include <linux/init.h>
16380 +#include <linux/list.h>
16381 +#include <linux/slab.h>
16382 +#include <linux/sched.h>
16383 +#include <linux/wait.h>
16384 +#include <linux/crypto.h>
16385 +#include <linux/interrupt.h>
16386 +
16387 +#include <cryptodev.h>
16388 +#include <uio.h>
16389 +
16390 +static int32_t                  null_id = -1;
16391 +static u_int32_t                null_sesnum = 0;
16392 +
16393 +static int null_process(device_t, struct cryptop *, int);
16394 +static int null_newsession(device_t, u_int32_t *, struct cryptoini *);
16395 +static int null_freesession(device_t, u_int64_t);
16396 +
16397 +#define debug ocfnull_debug
16398 +int ocfnull_debug = 0;
16399 +module_param(ocfnull_debug, int, 0644);
16400 +MODULE_PARM_DESC(ocfnull_debug, "Enable debug");
16401 +
16402 +/*
16403 + * dummy device structure
16404 + */
16405 +
16406 +static struct {
16407 +       softc_device_decl       sc_dev;
16408 +} nulldev;
16409 +
16410 +static device_method_t null_methods = {
16411 +       /* crypto device methods */
16412 +       DEVMETHOD(cryptodev_newsession, null_newsession),
16413 +       DEVMETHOD(cryptodev_freesession,null_freesession),
16414 +       DEVMETHOD(cryptodev_process,    null_process),
16415 +};
16416 +
16417 +/*
16418 + * Generate a new software session.
16419 + */
16420 +static int
16421 +null_newsession(device_t arg, u_int32_t *sid, struct cryptoini *cri)
16422 +{
16423 +       dprintk("%s()\n", __FUNCTION__);
16424 +       if (sid == NULL || cri == NULL) {
16425 +               dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
16426 +               return EINVAL;
16427 +       }
16428 +
16429 +       if (null_sesnum == 0)
16430 +               null_sesnum++;
16431 +       *sid = null_sesnum++;
16432 +       return 0;
16433 +}
16434 +
16435 +
16436 +/*
16437 + * Free a session.
16438 + */
16439 +static int
16440 +null_freesession(device_t arg, u_int64_t tid)
16441 +{
16442 +       u_int32_t sid = CRYPTO_SESID2LID(tid);
16443 +
16444 +       dprintk("%s()\n", __FUNCTION__);
16445 +       if (sid > null_sesnum) {
16446 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
16447 +               return EINVAL;
16448 +       }
16449 +
16450 +       /* Silently accept and return */
16451 +       if (sid == 0)
16452 +               return 0;
16453 +       return 0;
16454 +}
16455 +
16456 +
16457 +/*
16458 + * Process a request.
16459 + */
16460 +static int
16461 +null_process(device_t arg, struct cryptop *crp, int hint)
16462 +{
16463 +       unsigned int lid;
16464 +
16465 +       dprintk("%s()\n", __FUNCTION__);
16466 +
16467 +       /* Sanity check */
16468 +       if (crp == NULL) {
16469 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
16470 +               return EINVAL;
16471 +       }
16472 +
16473 +       crp->crp_etype = 0;
16474 +
16475 +       if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
16476 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
16477 +               crp->crp_etype = EINVAL;
16478 +               goto done;
16479 +       }
16480 +
16481 +       /*
16482 +        * find the session we are using
16483 +        */
16484 +
16485 +       lid = crp->crp_sid & 0xffffffff;
16486 +       if (lid >= null_sesnum || lid == 0) {
16487 +               crp->crp_etype = ENOENT;
16488 +               dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
16489 +               goto done;
16490 +       }
16491 +
16492 +done:
16493 +       crypto_done(crp);
16494 +       return 0;
16495 +}
16496 +
16497 +
16498 +/*
16499 + * our driver startup and shutdown routines
16500 + */
16501 +
16502 +static int
16503 +null_init(void)
16504 +{
16505 +       dprintk("%s(%p)\n", __FUNCTION__, null_init);
16506 +
16507 +       memset(&nulldev, 0, sizeof(nulldev));
16508 +       softc_device_init(&nulldev, "ocfnull", 0, null_methods);
16509 +
16510 +       null_id = crypto_get_driverid(softc_get_device(&nulldev),
16511 +                               CRYPTOCAP_F_HARDWARE);
16512 +       if (null_id < 0)
16513 +               panic("ocfnull: crypto device cannot initialize!");
16514 +
16515 +#define        REGISTER(alg) \
16516 +       crypto_register(null_id,alg,0,0)
16517 +       REGISTER(CRYPTO_DES_CBC);
16518 +       REGISTER(CRYPTO_3DES_CBC);
16519 +       REGISTER(CRYPTO_RIJNDAEL128_CBC);
16520 +       REGISTER(CRYPTO_MD5);
16521 +       REGISTER(CRYPTO_SHA1);
16522 +       REGISTER(CRYPTO_MD5_HMAC);
16523 +       REGISTER(CRYPTO_SHA1_HMAC);
16524 +#undef REGISTER
16525 +
16526 +       return 0;
16527 +}
16528 +
16529 +static void
16530 +null_exit(void)
16531 +{
16532 +       dprintk("%s()\n", __FUNCTION__);
16533 +       crypto_unregister_all(null_id);
16534 +       null_id = -1;
16535 +}
16536 +
16537 +module_init(null_init);
16538 +module_exit(null_exit);
16539 +
16540 +MODULE_LICENSE("Dual BSD/GPL");
16541 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
16542 +MODULE_DESCRIPTION("ocfnull - claims a lot but does nothing");
16543 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
16544 +++ linux/crypto/ocf/cryptosoft.c       2008-02-14 14:59:01.000000000 +1000
16545 @@ -0,0 +1,898 @@
16546 +/*
16547 + * An OCF module that uses the linux kernel cryptoapi, based on the
16548 + * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
16549 + * but is mostly unrecognisable,
16550 + *
16551 + * Written by David McCullough <david_mccullough@securecomputing.com>
16552 + * Copyright (C) 2004-2007 David McCullough
16553 + * Copyright (C) 2004-2005 Intel Corporation.
16554 + *
16555 + * LICENSE TERMS
16556 + *
16557 + * The free distribution and use of this software in both source and binary
16558 + * form is allowed (with or without changes) provided that:
16559 + *
16560 + *   1. distributions of this source code include the above copyright
16561 + *      notice, this list of conditions and the following disclaimer;
16562 + *
16563 + *   2. distributions in binary form include the above copyright
16564 + *      notice, this list of conditions and the following disclaimer
16565 + *      in the documentation and/or other associated materials;
16566 + *
16567 + *   3. the copyright holder's name is not used to endorse products
16568 + *      built using this software without specific written permission.
16569 + *
16570 + * ALTERNATIVELY, provided that this notice is retained in full, this product
16571 + * may be distributed under the terms of the GNU General Public License (GPL),
16572 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
16573 + *
16574 + * DISCLAIMER
16575 + *
16576 + * This software is provided 'as is' with no explicit or implied warranties
16577 + * in respect of its properties, including, but not limited to, correctness
16578 + * and/or fitness for purpose.
16579 + * ---------------------------------------------------------------------------
16580 + */
16581 +
16582 +#ifndef AUTOCONF_INCLUDED
16583 +#include <linux/config.h>
16584 +#endif
16585 +#include <linux/module.h>
16586 +#include <linux/init.h>
16587 +#include <linux/list.h>
16588 +#include <linux/slab.h>
16589 +#include <linux/sched.h>
16590 +#include <linux/wait.h>
16591 +#include <linux/crypto.h>
16592 +#include <linux/mm.h>
16593 +#include <linux/skbuff.h>
16594 +#include <linux/random.h>
16595 +#include <asm/scatterlist.h>
16596 +
16597 +#include <cryptodev.h>
16598 +#include <uio.h>
16599 +
16600 +struct {
16601 +       softc_device_decl       sc_dev;
16602 +} swcr_softc;
16603 +
16604 +#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
16605 +
16606 +/* Software session entry */
16607 +
16608 +#define SW_TYPE_CIPHER         0
16609 +#define SW_TYPE_HMAC           1
16610 +#define SW_TYPE_AUTH2          2
16611 +#define SW_TYPE_HASH           3
16612 +#define SW_TYPE_COMP           4
16613 +#define SW_TYPE_BLKCIPHER      5
16614 +
16615 +struct swcr_data {
16616 +       int                                     sw_type;
16617 +       int                                     sw_alg;
16618 +       struct crypto_tfm       *sw_tfm;
16619 +       union {
16620 +               struct {
16621 +                       char *sw_key;
16622 +                       int  sw_klen;
16623 +                       int  sw_mlen;
16624 +               } hmac;
16625 +               void *sw_comp_buf;
16626 +       } u;
16627 +       struct swcr_data        *sw_next;
16628 +};
16629 +
16630 +#ifndef CRYPTO_TFM_MODE_CBC
16631 +/*
16632 + * As of linux-2.6.21 this is no longer defined, and presumably no longer
16633 + * needed to be passed into the crypto core code.
16634 + */
16635 +#define        CRYPTO_TFM_MODE_CBC     0
16636 +#define        CRYPTO_TFM_MODE_ECB     0
16637 +#endif
16638 +
16639 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
16640 +       /*
16641 +        * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
16642 +        * API into old API.
16643 +        */
16644 +
16645 +       /* Symmetric/Block Cipher */
16646 +       struct blkcipher_desc
16647 +       {
16648 +               struct crypto_tfm *tfm;
16649 +               void *info;
16650 +       };
16651 +       #define ecb(X)                                                          #X
16652 +       #define cbc(X)                                                          #X
16653 +       #define crypto_has_blkcipher(X, Y, Z)           crypto_alg_available(X, 0)
16654 +       #define crypto_blkcipher_cast(X)                        X
16655 +       #define crypto_blkcipher_tfm(X)                         X
16656 +       #define crypto_alloc_blkcipher(X, Y, Z)         crypto_alloc_tfm(X, mode)
16657 +       #define crypto_blkcipher_ivsize(X)                      crypto_tfm_alg_ivsize(X)
16658 +       #define crypto_blkcipher_blocksize(X)           crypto_tfm_alg_blocksize(X)
16659 +       #define crypto_blkcipher_setkey(X, Y, Z)        crypto_cipher_setkey(X, Y, Z)
16660 +       #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
16661 +                               crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
16662 +       #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
16663 +                               crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
16664 +
16665 +       /* Hash/HMAC/Digest */
16666 +       struct hash_desc
16667 +       {
16668 +               struct crypto_tfm *tfm;
16669 +       };
16670 +       #define hmac(X)                                                 #X
16671 +       #define crypto_has_hash(X, Y, Z)                crypto_alg_available(X, 0)
16672 +       #define crypto_hash_cast(X)                             X
16673 +       #define crypto_hash_tfm(X)                              X
16674 +       #define crypto_alloc_hash(X, Y, Z)              crypto_alloc_tfm(X, mode)
16675 +       #define crypto_hash_digestsize(X)               crypto_tfm_alg_digestsize(X)
16676 +       #define crypto_hash_digest(W, X, Y, Z)  \
16677 +                               crypto_digest_digest((W)->tfm, X, sg_num, Z)
16678 +
16679 +       /* Asymmetric Cipher */
16680 +       #define crypto_has_cipher(X, Y, Z)              crypto_alg_available(X, 0)
16681 +
16682 +       /* Compression */
16683 +       #define crypto_has_comp(X, Y, Z)                crypto_alg_available(X, 0)
16684 +       #define crypto_comp_tfm(X)                              X
16685 +       #define crypto_comp_cast(X)                             X
16686 +       #define crypto_alloc_comp(X, Y, Z)              crypto_alloc_tfm(X, mode)
16687 +#else
16688 +       #define ecb(X)  "ecb(" #X ")"
16689 +       #define cbc(X)  "cbc(" #X ")"
16690 +       #define hmac(X) "hmac(" #X ")"
16691 +#endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
16692 +
16693 +struct crypto_details
16694 +{
16695 +       char *alg_name;
16696 +       int mode;
16697 +       int sw_type;
16698 +};
16699 +
16700 +/*
16701 + * This needs to be kept updated with CRYPTO_xxx list (cryptodev.h).
16702 + * If the Algorithm is not supported, then insert a {NULL, 0, 0} entry.
16703 + *
16704 + * IMPORTANT: The index to the array IS CRYPTO_xxx.
16705 + */
16706 +static struct crypto_details crypto_details[CRYPTO_ALGORITHM_MAX + 1] = {
16707 +       { NULL,              0,                   0 },
16708 +       /* CRYPTO_xxx index starts at 1 */
16709 +       { cbc(des),          CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16710 +       { cbc(des3_ede),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16711 +       { cbc(blowfish),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16712 +       { cbc(cast5),        CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16713 +       { cbc(skipjack),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16714 +       { hmac(md5),         0,                   SW_TYPE_HMAC },
16715 +       { hmac(sha1),        0,                   SW_TYPE_HMAC },
16716 +       { hmac(ripemd160),   0,                   SW_TYPE_HMAC },
16717 +       { "md5-kpdk??",      0,                   SW_TYPE_HASH },
16718 +       { "sha1-kpdk??",     0,                   SW_TYPE_HASH },
16719 +       { cbc(aes),          CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16720 +       { ecb(arc4),         CRYPTO_TFM_MODE_ECB, SW_TYPE_BLKCIPHER },
16721 +       { "md5",             0,                   SW_TYPE_HASH },
16722 +       { "sha1",            0,                   SW_TYPE_HASH },
16723 +       { hmac(digest_null), 0,                   SW_TYPE_HMAC },
16724 +       { cbc(cipher_null),  CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16725 +       { "deflate",         0,                   SW_TYPE_COMP },
16726 +       { hmac(sha256),      0,                   SW_TYPE_HMAC },
16727 +       { hmac(sha384),      0,                   SW_TYPE_HMAC },
16728 +       { hmac(sha512),      0,                   SW_TYPE_HMAC },
16729 +       { cbc(camellia),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16730 +       { "sha256",          0,                   SW_TYPE_HASH },
16731 +       { "sha384",          0,                   SW_TYPE_HASH },
16732 +       { "sha512",          0,                   SW_TYPE_HASH },
16733 +       { "ripemd160",       0,                   SW_TYPE_HASH },
16734 +};
16735 +
16736 +int32_t swcr_id = -1;
16737 +module_param(swcr_id, int, 0444);
16738 +MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
16739 +
16740 +int swcr_fail_if_compression_grows = 1;
16741 +module_param(swcr_fail_if_compression_grows, int, 0644);
16742 +MODULE_PARM_DESC(swcr_fail_if_compression_grows,
16743 +                "Treat compression that results in more data as a failure");
16744 +
16745 +static struct swcr_data **swcr_sessions = NULL;
16746 +static u_int32_t swcr_sesnum = 0;
16747 +
16748 +static int swcr_process(device_t, struct cryptop *, int);
16749 +static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
16750 +static int swcr_freesession(device_t, u_int64_t);
16751 +
16752 +static device_method_t swcr_methods = {
16753 +       /* crypto device methods */
16754 +       DEVMETHOD(cryptodev_newsession, swcr_newsession),
16755 +       DEVMETHOD(cryptodev_freesession,swcr_freesession),
16756 +       DEVMETHOD(cryptodev_process,    swcr_process),
16757 +};
16758 +
16759 +#define debug swcr_debug
16760 +int swcr_debug = 0;
16761 +module_param(swcr_debug, int, 0644);
16762 +MODULE_PARM_DESC(swcr_debug, "Enable debug");
16763 +
16764 +/*
16765 + * Generate a new software session.
16766 + */
16767 +static int
16768 +swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
16769 +{
16770 +       struct swcr_data **swd;
16771 +       u_int32_t i;
16772 +       int error;
16773 +       char *algo;
16774 +       int mode, sw_type;
16775 +
16776 +       dprintk("%s()\n", __FUNCTION__);
16777 +       if (sid == NULL || cri == NULL) {
16778 +               dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
16779 +               return EINVAL;
16780 +       }
16781 +
16782 +       if (swcr_sessions) {
16783 +               for (i = 1; i < swcr_sesnum; i++)
16784 +                       if (swcr_sessions[i] == NULL)
16785 +                               break;
16786 +       } else
16787 +               i = 1;          /* NB: to silence compiler warning */
16788 +
16789 +       if (swcr_sessions == NULL || i == swcr_sesnum) {
16790 +               if (swcr_sessions == NULL) {
16791 +                       i = 1; /* We leave swcr_sessions[0] empty */
16792 +                       swcr_sesnum = CRYPTO_SW_SESSIONS;
16793 +               } else
16794 +                       swcr_sesnum *= 2;
16795 +
16796 +               swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
16797 +               if (swd == NULL) {
16798 +                       /* Reset session number */
16799 +                       if (swcr_sesnum == CRYPTO_SW_SESSIONS)
16800 +                               swcr_sesnum = 0;
16801 +                       else
16802 +                               swcr_sesnum /= 2;
16803 +                       dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16804 +                       return ENOBUFS;
16805 +               }
16806 +               memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
16807 +
16808 +               /* Copy existing sessions */
16809 +               if (swcr_sessions) {
16810 +                       memcpy(swd, swcr_sessions,
16811 +                           (swcr_sesnum / 2) * sizeof(struct swcr_data *));
16812 +                       kfree(swcr_sessions);
16813 +               }
16814 +
16815 +               swcr_sessions = swd;
16816 +       }
16817 +
16818 +       swd = &swcr_sessions[i];
16819 +       *sid = i;
16820 +
16821 +       while (cri) {
16822 +               *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
16823 +                               SLAB_ATOMIC);
16824 +               if (*swd == NULL) {
16825 +                       swcr_freesession(NULL, i);
16826 +                       dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16827 +                       return ENOBUFS;
16828 +               }
16829 +               memset(*swd, 0, sizeof(struct swcr_data));
16830 +
16831 +               if (cri->cri_alg > CRYPTO_ALGORITHM_MAX) {
16832 +                       printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
16833 +                       swcr_freesession(NULL, i);
16834 +                       return EINVAL;
16835 +               }
16836 +
16837 +               algo = crypto_details[cri->cri_alg].alg_name;
16838 +               if (!algo || !*algo) {
16839 +                       printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
16840 +                       swcr_freesession(NULL, i);
16841 +                       return EINVAL;
16842 +               }
16843 +
16844 +               mode = crypto_details[cri->cri_alg].mode;
16845 +               sw_type = crypto_details[cri->cri_alg].sw_type;
16846 +
16847 +               /* Algorithm specific configuration */
16848 +               switch (cri->cri_alg) {
16849 +               case CRYPTO_NULL_CBC:
16850 +                       cri->cri_klen = 0; /* make it work with crypto API */
16851 +                       break;
16852 +               default:
16853 +                       break;
16854 +               }
16855 +
16856 +               if (sw_type == SW_TYPE_BLKCIPHER) {
16857 +                       dprintk("%s crypto_alloc_blkcipher(%s, 0x%x)\n", __FUNCTION__,
16858 +                                       algo, mode);
16859 +
16860 +                       (*swd)->sw_tfm = crypto_blkcipher_tfm(
16861 +                                                               crypto_alloc_blkcipher(algo, 0,
16862 +                                                                       CRYPTO_ALG_ASYNC));
16863 +                       if (!(*swd)->sw_tfm) {
16864 +                               dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s,0x%x)\n",
16865 +                                               algo,mode);
16866 +                               swcr_freesession(NULL, i);
16867 +                               return EINVAL;
16868 +                       }
16869 +
16870 +                       if (debug) {
16871 +                               dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
16872 +                                               __FUNCTION__,cri->cri_klen,(cri->cri_klen + 7)/8);
16873 +                               for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
16874 +                               {
16875 +                                       dprintk("%s0x%x", (i % 8) ? " " : "\n    ",cri->cri_key[i]);
16876 +                               }
16877 +                               dprintk("\n");
16878 +                       }
16879 +                       error = crypto_blkcipher_setkey(
16880 +                                               crypto_blkcipher_cast((*swd)->sw_tfm), cri->cri_key,
16881 +                                                       (cri->cri_klen + 7) / 8);
16882 +                       if (error) {
16883 +                               printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
16884 +                                               (*swd)->sw_tfm->crt_flags);
16885 +                               swcr_freesession(NULL, i);
16886 +                               return error;
16887 +                       }
16888 +               } else if (sw_type == SW_TYPE_HMAC || sw_type == SW_TYPE_HASH) {
16889 +                       dprintk("%s crypto_alloc_hash(%s, 0x%x)\n", __FUNCTION__,
16890 +                                       algo, mode);
16891 +
16892 +                       (*swd)->sw_tfm = crypto_hash_tfm(
16893 +                                                               crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
16894 +
16895 +                       if (!(*swd)->sw_tfm) {
16896 +                               dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
16897 +                                               algo, mode);
16898 +                               swcr_freesession(NULL, i);
16899 +                               return EINVAL;
16900 +                       }
16901 +
16902 +                       (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
16903 +                       (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
16904 +                               SLAB_ATOMIC);
16905 +                       if ((*swd)->u.hmac.sw_key == NULL) {
16906 +                               swcr_freesession(NULL, i);
16907 +                               dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16908 +                               return ENOBUFS;
16909 +                       }
16910 +                       memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
16911 +                       if (cri->cri_mlen) {
16912 +                               (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
16913 +                       } else {
16914 +                               (*swd)->u.hmac.sw_mlen =
16915 +                                               crypto_hash_digestsize(
16916 +                                                               crypto_hash_cast((*swd)->sw_tfm));
16917 +                       }
16918 +               } else if (sw_type == SW_TYPE_COMP) {
16919 +                       (*swd)->sw_tfm = crypto_comp_tfm(
16920 +                                       crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
16921 +                       if (!(*swd)->sw_tfm) {
16922 +                               dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
16923 +                                               algo, mode);
16924 +                               swcr_freesession(NULL, i);
16925 +                               return EINVAL;
16926 +                       }
16927 +                       (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
16928 +                       if ((*swd)->u.sw_comp_buf == NULL) {
16929 +                               swcr_freesession(NULL, i);
16930 +                               dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16931 +                               return ENOBUFS;
16932 +                       }
16933 +               } else {
16934 +                       printk("cryptosoft: Unhandled sw_type %d\n", sw_type);
16935 +                       swcr_freesession(NULL, i);
16936 +                       return EINVAL;
16937 +               }
16938 +
16939 +               (*swd)->sw_alg = cri->cri_alg;
16940 +               (*swd)->sw_type = sw_type;
16941 +
16942 +               cri = cri->cri_next;
16943 +               swd = &((*swd)->sw_next);
16944 +       }
16945 +       return 0;
16946 +}
16947 +
16948 +/*
16949 + * Free a session.
16950 + */
16951 +static int
16952 +swcr_freesession(device_t dev, u_int64_t tid)
16953 +{
16954 +       struct swcr_data *swd;
16955 +       u_int32_t sid = CRYPTO_SESID2LID(tid);
16956 +
16957 +       dprintk("%s()\n", __FUNCTION__);
16958 +       if (sid > swcr_sesnum || swcr_sessions == NULL ||
16959 +                       swcr_sessions[sid] == NULL) {
16960 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
16961 +               return(EINVAL);
16962 +       }
16963 +
16964 +       /* Silently accept and return */
16965 +       if (sid == 0)
16966 +               return(0);
16967 +
16968 +       while ((swd = swcr_sessions[sid]) != NULL) {
16969 +               swcr_sessions[sid] = swd->sw_next;
16970 +               if (swd->sw_tfm)
16971 +                       crypto_free_tfm(swd->sw_tfm);
16972 +               if (swd->sw_type == SW_TYPE_COMP) {
16973 +                       if (swd->u.sw_comp_buf)
16974 +                               kfree(swd->u.sw_comp_buf);
16975 +               } else {
16976 +                       if (swd->u.hmac.sw_key)
16977 +                               kfree(swd->u.hmac.sw_key);
16978 +               }
16979 +               kfree(swd);
16980 +       }
16981 +       return 0;
16982 +}
16983 +
16984 +/*
16985 + * Process a software request.
16986 + */
16987 +static int
16988 +swcr_process(device_t dev, struct cryptop *crp, int hint)
16989 +{
16990 +       struct cryptodesc *crd;
16991 +       struct swcr_data *sw;
16992 +       u_int32_t lid;
16993 +#define SCATTERLIST_MAX 16
16994 +       struct scatterlist sg[SCATTERLIST_MAX];
16995 +       int sg_num, sg_len, skip;
16996 +       struct sk_buff *skb = NULL;
16997 +       struct uio *uiop = NULL;
16998 +
16999 +       dprintk("%s()\n", __FUNCTION__);
17000 +       /* Sanity check */
17001 +       if (crp == NULL) {
17002 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17003 +               return EINVAL;
17004 +       }
17005 +
17006 +       crp->crp_etype = 0;
17007 +
17008 +       if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
17009 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17010 +               crp->crp_etype = EINVAL;
17011 +               goto done;
17012 +       }
17013 +
17014 +       lid = crp->crp_sid & 0xffffffff;
17015 +       if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
17016 +                       swcr_sessions[lid] == NULL) {
17017 +               crp->crp_etype = ENOENT;
17018 +               dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
17019 +               goto done;
17020 +       }
17021 +
17022 +       /*
17023 +        * do some error checking outside of the loop for SKB and IOV processing
17024 +        * this leaves us with valid skb or uiop pointers for later
17025 +        */
17026 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
17027 +               skb = (struct sk_buff *) crp->crp_buf;
17028 +               if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
17029 +                       printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
17030 +                                       skb_shinfo(skb)->nr_frags);
17031 +                       goto done;
17032 +               }
17033 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
17034 +               uiop = (struct uio *) crp->crp_buf;
17035 +               if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
17036 +                       printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
17037 +                                       uiop->uio_iovcnt);
17038 +                       goto done;
17039 +               }
17040 +       }
17041 +
17042 +       /* Go through crypto descriptors, processing as we go */
17043 +       for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
17044 +               /*
17045 +                * Find the crypto context.
17046 +                *
17047 +                * XXX Note that the logic here prevents us from having
17048 +                * XXX the same algorithm multiple times in a session
17049 +                * XXX (or rather, we can but it won't give us the right
17050 +                * XXX results). To do that, we'd need some way of differentiating
17051 +                * XXX between the various instances of an algorithm (so we can
17052 +                * XXX locate the correct crypto context).
17053 +                */
17054 +               for (sw = swcr_sessions[lid]; sw && sw->sw_alg != crd->crd_alg;
17055 +                               sw = sw->sw_next)
17056 +                       ;
17057 +
17058 +               /* No such context ? */
17059 +               if (sw == NULL) {
17060 +                       crp->crp_etype = EINVAL;
17061 +                       dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17062 +                       goto done;
17063 +               }
17064 +
17065 +               skip = crd->crd_skip;
17066 +
17067 +               /*
17068 +                * setup the SG list skip from the start of the buffer
17069 +                */
17070 +               memset(sg, 0, sizeof(sg));
17071 +               if (crp->crp_flags & CRYPTO_F_SKBUF) {
17072 +                       int i, len;
17073 +
17074 +                       sg_num = 0;
17075 +                       sg_len = 0;
17076 +
17077 +                       if (skip < skb_headlen(skb)) {
17078 +                               len = skb_headlen(skb) - skip;
17079 +                               if (len + sg_len > crd->crd_len)
17080 +                                       len = crd->crd_len - sg_len;
17081 +                               sg_set_page(&sg[sg_num],
17082 +                                       virt_to_page(skb->data + skip), len,
17083 +                                       offset_in_page(skb->data + skip));
17084 +                               sg_len += len;
17085 +                               sg_num++;
17086 +                               skip = 0;
17087 +                       } else
17088 +                               skip -= skb_headlen(skb);
17089 +
17090 +                       for (i = 0; sg_len < crd->crd_len &&
17091 +                                               i < skb_shinfo(skb)->nr_frags &&
17092 +                                               sg_num < SCATTERLIST_MAX; i++) {
17093 +                               if (skip < skb_shinfo(skb)->frags[i].size) {
17094 +                                       len = skb_shinfo(skb)->frags[i].size - skip;
17095 +                                       if (len + sg_len > crd->crd_len)
17096 +                                               len = crd->crd_len - sg_len;
17097 +                                       sg_set_page(&sg[sg_num],
17098 +                                               skb_shinfo(skb)->frags[i].page,
17099 +                                               len,
17100 +                                               skb_shinfo(skb)->frags[i].page_offset + skip);
17101 +                                       sg_len += len;
17102 +                                       sg_num++;
17103 +                                       skip = 0;
17104 +                               } else
17105 +                                       skip -= skb_shinfo(skb)->frags[i].size;
17106 +                       }
17107 +               } else if (crp->crp_flags & CRYPTO_F_IOV) {
17108 +                       int len;
17109 +
17110 +                       sg_len = 0;
17111 +                       for (sg_num = 0; sg_len <= crd->crd_len &&
17112 +                                       sg_num < uiop->uio_iovcnt &&
17113 +                                       sg_num < SCATTERLIST_MAX; sg_num++) {
17114 +                               if (skip <= uiop->uio_iov[sg_num].iov_len) {
17115 +                                       len = uiop->uio_iov[sg_num].iov_len - skip;
17116 +                                       if (len + sg_len > crd->crd_len)
17117 +                                               len = crd->crd_len - sg_len;
17118 +                                       sg_set_page(&sg[sg_num],
17119 +                                               virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
17120 +                                               len,
17121 +                                               offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
17122 +                                       sg_len += len;
17123 +                                       skip = 0;
17124 +                               } else 
17125 +                                       skip -= uiop->uio_iov[sg_num].iov_len;
17126 +                       }
17127 +               } else {
17128 +                       sg_len = (crp->crp_ilen - skip);
17129 +                       if (sg_len > crd->crd_len)
17130 +                               sg_len = crd->crd_len;
17131 +                       sg_set_page(&sg[0], virt_to_page(crp->crp_buf + skip),
17132 +                               sg_len, offset_in_page(crp->crp_buf + skip));
17133 +                       sg_num = 1;
17134 +               }
17135 +
17136 +
17137 +               switch (sw->sw_type) {
17138 +               case SW_TYPE_BLKCIPHER: {
17139 +                       unsigned char iv[EALG_MAX_BLOCK_LEN];
17140 +                       unsigned char *ivp = iv;
17141 +                       int ivsize = 
17142 +                               crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
17143 +                       struct blkcipher_desc desc;
17144 +
17145 +                       if (sg_len < crypto_blkcipher_blocksize(
17146 +                                       crypto_blkcipher_cast(sw->sw_tfm))) {
17147 +                               crp->crp_etype = EINVAL;
17148 +                               dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
17149 +                                               sg_len, crypto_blkcipher_blocksize(
17150 +                                                       crypto_blkcipher_cast(sw->sw_tfm)));
17151 +                               goto done;
17152 +                       }
17153 +
17154 +                       if (ivsize > sizeof(iv)) {
17155 +                               crp->crp_etype = EINVAL;
17156 +                               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17157 +                               goto done;
17158 +                       }
17159 +
17160 +                       if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
17161 +                               int i, error;
17162 +
17163 +                               if (debug) {
17164 +                                       dprintk("%s key:", __FUNCTION__);
17165 +                                       for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
17166 +                                               dprintk("%s0x%x", (i % 8) ? " " : "\n    ",
17167 +                                                               crd->crd_key[i]);
17168 +                                       dprintk("\n");
17169 +                               }
17170 +                               error = crypto_blkcipher_setkey(
17171 +                                                       crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
17172 +                                                       (crd->crd_klen + 7) / 8);
17173 +                               if (error) {
17174 +                                       dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
17175 +                                                       error, sw->sw_tfm->crt_flags);
17176 +                                       crp->crp_etype = -error;
17177 +                               }
17178 +                       }
17179 +
17180 +                       memset(&desc, 0, sizeof(desc));
17181 +                       desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
17182 +
17183 +                       if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
17184 +
17185 +                               if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
17186 +                                       ivp = crd->crd_iv;
17187 +                               } else {
17188 +                                       get_random_bytes(ivp, ivsize);
17189 +                               }
17190 +                               /*
17191 +                                * do we have to copy the IV back to the buffer ?
17192 +                                */
17193 +                               if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
17194 +                                       crypto_copyback(crp->crp_flags, crp->crp_buf,
17195 +                                                       crd->crd_inject, ivsize, (caddr_t)ivp);
17196 +                               }
17197 +                               desc.info = ivp;
17198 +                               crypto_blkcipher_encrypt_iv(&desc, sg, sg, sg_len);
17199 +
17200 +                       } else { /*decrypt */
17201 +
17202 +                               if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
17203 +                                       ivp = crd->crd_iv;
17204 +                               } else {
17205 +                                       crypto_copydata(crp->crp_flags, crp->crp_buf,
17206 +                                                       crd->crd_inject, ivsize, (caddr_t)ivp);
17207 +                               }
17208 +                               desc.info = ivp;
17209 +                               crypto_blkcipher_decrypt_iv(&desc, sg, sg, sg_len);
17210 +                       }
17211 +                       } break;
17212 +               case SW_TYPE_HMAC:
17213 +               case SW_TYPE_HASH:
17214 +                       {
17215 +                       char result[HASH_MAX_LEN];
17216 +                       struct hash_desc desc;
17217 +
17218 +                       /* check we have room for the result */
17219 +                       if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
17220 +                               dprintk(
17221 +                       "cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d digestsize=%d\n",
17222 +                                               crp->crp_ilen, crd->crd_skip + sg_len, crd->crd_inject,
17223 +                                               sw->u.hmac.sw_mlen);
17224 +                               crp->crp_etype = EINVAL;
17225 +                               goto done;
17226 +                       }
17227 +
17228 +                       memset(&desc, 0, sizeof(desc));
17229 +                       desc.tfm = crypto_hash_cast(sw->sw_tfm);
17230 +
17231 +                       memset(result, 0, sizeof(result));
17232 +
17233 +                       if (sw->sw_type == SW_TYPE_HMAC) {
17234 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
17235 +                               crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
17236 +                                               sg, sg_num, result);
17237 +#else
17238 +                               crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
17239 +                                               sw->u.hmac.sw_klen);
17240 +                               crypto_hash_digest(&desc, sg, sg_len, result);
17241 +#endif /* #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
17242 +                               
17243 +                       } else { /* SW_TYPE_HASH */
17244 +                               crypto_hash_digest(&desc, sg, sg_len, result);
17245 +                       }
17246 +
17247 +                       crypto_copyback(crp->crp_flags, crp->crp_buf,
17248 +                                       crd->crd_inject, sw->u.hmac.sw_mlen, result);
17249 +                       }
17250 +                       break;
17251 +
17252 +               case SW_TYPE_COMP: {
17253 +                       void *ibuf = NULL;
17254 +                       void *obuf = sw->u.sw_comp_buf;
17255 +                       int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
17256 +                       int ret = 0;
17257 +
17258 +                       /*
17259 +                        * we need to use an additional copy if there is more than one
17260 +                        * input chunk since the kernel comp routines do not handle
17261 +                        * SG yet.  Otherwise we just use the input buffer as is.
17262 +                        * Rather than allocate another buffer we just split the tmp
17263 +                        * buffer we already have.
17264 +                        * Perhaps we should just use zlib directly ?
17265 +                        */
17266 +                       if (sg_num > 1) {
17267 +                               int blk;
17268 +
17269 +                               ibuf = obuf;
17270 +                               for (blk = 0; blk < sg_num; blk++) {
17271 +                                       memcpy(obuf, sg_virt(&sg[blk]),
17272 +                                                       sg[blk].length);
17273 +                                       obuf += sg[blk].length;
17274 +                               }
17275 +                               olen -= sg_len;
17276 +                       } else
17277 +                               ibuf = sg_virt(&sg[0]);
17278 +
17279 +                       if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
17280 +                               ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
17281 +                                               ibuf, ilen, obuf, &olen);
17282 +                               if (!ret && olen > crd->crd_len) {
17283 +                                       dprintk("cryptosoft: ERANGE compress %d into %d\n",
17284 +                                                       crd->crd_len, olen);
17285 +                                       if (swcr_fail_if_compression_grows)
17286 +                                               ret = ERANGE;
17287 +                               }
17288 +                       } else { /* decompress */
17289 +                               ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
17290 +                                               ibuf, ilen, obuf, &olen);
17291 +                               if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
17292 +                                       dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
17293 +                                                       "space for %d,at offset %d\n",
17294 +                                                       crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
17295 +                                       ret = ETOOSMALL;
17296 +                               }
17297 +                       }
17298 +                       if (ret)
17299 +                               dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
17300 +
17301 +                       /*
17302 +                        * on success copy result back,
17303 +                        * linux crpyto API returns -errno,  we need to fix that
17304 +                        */
17305 +                       crp->crp_etype = ret < 0 ? -ret : ret;
17306 +                       if (ret == 0) {
17307 +                               /* copy back the result and return it's size */
17308 +                               crypto_copyback(crp->crp_flags, crp->crp_buf,
17309 +                                               crd->crd_inject, olen, obuf);
17310 +                               crp->crp_olen = olen;
17311 +                       }
17312 +
17313 +
17314 +                       } break;
17315 +
17316 +               default:
17317 +                       /* Unknown/unsupported algorithm */
17318 +                       dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17319 +                       crp->crp_etype = EINVAL;
17320 +                       goto done;
17321 +               }
17322 +       }
17323 +
17324 +done:
17325 +       crypto_done(crp);
17326 +       return 0;
17327 +}
17328 +
17329 +static int
17330 +cryptosoft_init(void)
17331 +{
17332 +       int i, sw_type, mode;
17333 +       char *algo;
17334 +
17335 +       dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
17336 +
17337 +       softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
17338 +
17339 +       swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
17340 +                       CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
17341 +       if (swcr_id < 0) {
17342 +               printk("Software crypto device cannot initialize!");
17343 +               return -ENODEV;
17344 +       }
17345 +
17346 +#define        REGISTER(alg) \
17347 +               crypto_register(swcr_id, alg, 0,0);
17348 +
17349 +       for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; ++i)
17350 +       {
17351 +               
17352 +               algo = crypto_details[i].alg_name;
17353 +               if (!algo || !*algo)
17354 +               {
17355 +                       dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
17356 +                       continue;
17357 +               }
17358 +
17359 +               mode = crypto_details[i].mode;
17360 +               sw_type = crypto_details[i].sw_type;
17361 +
17362 +               switch (sw_type)
17363 +               {
17364 +                       case SW_TYPE_CIPHER:
17365 +                               if (crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC))
17366 +                               {
17367 +                                       REGISTER(i);
17368 +                               }
17369 +                               else
17370 +                               {
17371 +                                       dprintk("%s:CIPHER algorithm %d:'%s' not supported\n",
17372 +                                                               __FUNCTION__, i, algo);
17373 +                               }
17374 +                               break;
17375 +                       case SW_TYPE_HMAC:
17376 +                               if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
17377 +                               {
17378 +                                       REGISTER(i);
17379 +                               }
17380 +                               else
17381 +                               {
17382 +                                       dprintk("%s:HMAC algorithm %d:'%s' not supported\n",
17383 +                                                               __FUNCTION__, i, algo);
17384 +                               }
17385 +                               break;
17386 +                       case SW_TYPE_HASH:
17387 +                               if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
17388 +                               {
17389 +                                       REGISTER(i);
17390 +                               }
17391 +                               else
17392 +                               {
17393 +                                       dprintk("%s:HASH algorithm %d:'%s' not supported\n",
17394 +                                                               __FUNCTION__, i, algo);
17395 +                               }
17396 +                               break;
17397 +                       case SW_TYPE_COMP:
17398 +                               if (crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC))
17399 +                               {
17400 +                                       REGISTER(i);
17401 +                               }
17402 +                               else
17403 +                               {
17404 +                                       dprintk("%s:COMP algorithm %d:'%s' not supported\n",
17405 +                                                               __FUNCTION__, i, algo);
17406 +                               }
17407 +                               break;
17408 +                       case SW_TYPE_BLKCIPHER:
17409 +                               if (crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC))
17410 +                               {
17411 +                                       REGISTER(i);
17412 +                               }
17413 +                               else
17414 +                               {
17415 +                                       dprintk("%s:BLKCIPHER algorithm %d:'%s' not supported\n",
17416 +                                                               __FUNCTION__, i, algo);
17417 +                               }
17418 +                               break;
17419 +                       default:
17420 +                               dprintk(
17421 +                               "%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
17422 +                                       __FUNCTION__, sw_type, i, algo);
17423 +                               break;
17424 +               }
17425 +       }
17426 +
17427 +       return(0);
17428 +}
17429 +
17430 +static void
17431 +cryptosoft_exit(void)
17432 +{
17433 +       dprintk("%s()\n", __FUNCTION__);
17434 +       crypto_unregister_all(swcr_id);
17435 +       swcr_id = -1;
17436 +}
17437 +
17438 +module_init(cryptosoft_init);
17439 +module_exit(cryptosoft_exit);
17440 +
17441 +MODULE_LICENSE("Dual BSD/GPL");
17442 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
17443 +MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");
17444 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
17445 +++ linux/crypto/ocf/rndtest.c  2007-07-20 13:22:03.000000000 +1000
17446 @@ -0,0 +1,300 @@
17447 +/*     $OpenBSD$       */
17448 +
17449 +/*
17450 + * OCF/Linux port done by David McCullough <david_mccullough@securecomputing.com>
17451 + * Copyright (C) 2006-2007 David McCullough
17452 + * Copyright (C) 2004-2005 Intel Corporation.
17453 + * The license and original author are listed below.
17454 + *
17455 + * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
17456 + * All rights reserved.
17457 + *
17458 + * Redistribution and use in source and binary forms, with or without
17459 + * modification, are permitted provided that the following conditions
17460 + * are met:
17461 + * 1. Redistributions of source code must retain the above copyright
17462 + *    notice, this list of conditions and the following disclaimer.
17463 + * 2. Redistributions in binary form must reproduce the above copyright
17464 + *    notice, this list of conditions and the following disclaimer in the
17465 + *    documentation and/or other materials provided with the distribution.
17466 + * 3. All advertising materials mentioning features or use of this software
17467 + *    must display the following acknowledgement:
17468 + *     This product includes software developed by Jason L. Wright
17469 + * 4. The name of the author may not be used to endorse or promote products
17470 + *    derived from this software without specific prior written permission.
17471 + *
17472 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17473 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17474 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17475 + * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
17476 + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
17477 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
17478 + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
17479 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
17480 + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
17481 + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
17482 + * POSSIBILITY OF SUCH DAMAGE.
17483 + */
17484 +
17485 +#ifndef AUTOCONF_INCLUDED
17486 +#include <linux/config.h>
17487 +#endif
17488 +#include <linux/module.h>
17489 +#include <linux/list.h>
17490 +#include <linux/wait.h>
17491 +#include <linux/time.h>
17492 +#include <linux/version.h>
17493 +#include <linux/unistd.h>
17494 +#include <linux/kernel.h>
17495 +#include <linux/string.h>
17496 +#include <linux/time.h>
17497 +#include <cryptodev.h>
17498 +#include "rndtest.h"
17499 +
17500 +static struct rndtest_stats rndstats;
17501 +
17502 +static void rndtest_test(struct rndtest_state *);
17503 +
17504 +/* The tests themselves */
17505 +static int rndtest_monobit(struct rndtest_state *);
17506 +static int rndtest_runs(struct rndtest_state *);
17507 +static int rndtest_longruns(struct rndtest_state *);
17508 +static int rndtest_chi_4(struct rndtest_state *);
17509 +
17510 +static int rndtest_runs_check(struct rndtest_state *, int, int *);
17511 +static void rndtest_runs_record(struct rndtest_state *, int, int *);
17512 +
17513 +static const struct rndtest_testfunc {
17514 +       int (*test)(struct rndtest_state *);
17515 +} rndtest_funcs[] = {
17516 +       { rndtest_monobit },
17517 +       { rndtest_runs },
17518 +       { rndtest_chi_4 },
17519 +       { rndtest_longruns },
17520 +};
17521 +
17522 +#define        RNDTEST_NTESTS  (sizeof(rndtest_funcs)/sizeof(rndtest_funcs[0]))
17523 +
17524 +static void
17525 +rndtest_test(struct rndtest_state *rsp)
17526 +{
17527 +       int i, rv = 0;
17528 +
17529 +       rndstats.rst_tests++;
17530 +       for (i = 0; i < RNDTEST_NTESTS; i++)
17531 +               rv |= (*rndtest_funcs[i].test)(rsp);
17532 +       rsp->rs_discard = (rv != 0);
17533 +}
17534 +
17535 +
17536 +extern int crypto_debug;
17537 +#define rndtest_verbose 2
17538 +#define rndtest_report(rsp, failure, fmt, a...) \
17539 +       { if (failure || crypto_debug) { printk("rng_test: " fmt "\n", a); } else; }
17540 +
17541 +#define        RNDTEST_MONOBIT_MINONES 9725
17542 +#define        RNDTEST_MONOBIT_MAXONES 10275
17543 +
17544 +static int
17545 +rndtest_monobit(struct rndtest_state *rsp)
17546 +{
17547 +       int i, ones = 0, j;
17548 +       u_int8_t r;
17549 +
17550 +       for (i = 0; i < RNDTEST_NBYTES; i++) {
17551 +               r = rsp->rs_buf[i];
17552 +               for (j = 0; j < 8; j++, r <<= 1)
17553 +                       if (r & 0x80)
17554 +                               ones++;
17555 +       }
17556 +       if (ones > RNDTEST_MONOBIT_MINONES &&
17557 +           ones < RNDTEST_MONOBIT_MAXONES) {
17558 +               if (rndtest_verbose > 1)
17559 +                       rndtest_report(rsp, 0, "monobit pass (%d < %d < %d)",
17560 +                           RNDTEST_MONOBIT_MINONES, ones,
17561 +                           RNDTEST_MONOBIT_MAXONES);
17562 +               return (0);
17563 +       } else {
17564 +               if (rndtest_verbose)
17565 +                       rndtest_report(rsp, 1,
17566 +                           "monobit failed (%d ones)", ones);
17567 +               rndstats.rst_monobit++;
17568 +               return (-1);
17569 +       }
17570 +}
17571 +
17572 +#define        RNDTEST_RUNS_NINTERVAL  6
17573 +
17574 +static const struct rndtest_runs_tabs {
17575 +       u_int16_t min, max;
17576 +} rndtest_runs_tab[] = {
17577 +       { 2343, 2657 },
17578 +       { 1135, 1365 },
17579 +       { 542, 708 },
17580 +       { 251, 373 },
17581 +       { 111, 201 },
17582 +       { 111, 201 },
17583 +};
17584 +
17585 +static int
17586 +rndtest_runs(struct rndtest_state *rsp)
17587 +{
17588 +       int i, j, ones, zeros, rv = 0;
17589 +       int onei[RNDTEST_RUNS_NINTERVAL], zeroi[RNDTEST_RUNS_NINTERVAL];
17590 +       u_int8_t c;
17591 +
17592 +       bzero(onei, sizeof(onei));
17593 +       bzero(zeroi, sizeof(zeroi));
17594 +       ones = zeros = 0;
17595 +       for (i = 0; i < RNDTEST_NBYTES; i++) {
17596 +               c = rsp->rs_buf[i];
17597 +               for (j = 0; j < 8; j++, c <<= 1) {
17598 +                       if (c & 0x80) {
17599 +                               ones++;
17600 +                               rndtest_runs_record(rsp, zeros, zeroi);
17601 +                               zeros = 0;
17602 +                       } else {
17603 +                               zeros++;
17604 +                               rndtest_runs_record(rsp, ones, onei);
17605 +                               ones = 0;
17606 +                       }
17607 +               }
17608 +       }
17609 +       rndtest_runs_record(rsp, ones, onei);
17610 +       rndtest_runs_record(rsp, zeros, zeroi);
17611 +
17612 +       rv |= rndtest_runs_check(rsp, 0, zeroi);
17613 +       rv |= rndtest_runs_check(rsp, 1, onei);
17614 +
17615 +       if (rv)
17616 +               rndstats.rst_runs++;
17617 +
17618 +       return (rv);
17619 +}
17620 +
17621 +static void
17622 +rndtest_runs_record(struct rndtest_state *rsp, int len, int *intrv)
17623 +{
17624 +       if (len == 0)
17625 +               return;
17626 +       if (len > RNDTEST_RUNS_NINTERVAL)
17627 +               len = RNDTEST_RUNS_NINTERVAL;
17628 +       len -= 1;
17629 +       intrv[len]++;
17630 +}
17631 +
17632 +static int
17633 +rndtest_runs_check(struct rndtest_state *rsp, int val, int *src)
17634 +{
17635 +       int i, rv = 0;
17636 +
17637 +       for (i = 0; i < RNDTEST_RUNS_NINTERVAL; i++) {
17638 +               if (src[i] < rndtest_runs_tab[i].min ||
17639 +                   src[i] > rndtest_runs_tab[i].max) {
17640 +                       rndtest_report(rsp, 1,
17641 +                           "%s interval %d failed (%d, %d-%d)",
17642 +                           val ? "ones" : "zeros",
17643 +                           i + 1, src[i], rndtest_runs_tab[i].min,
17644 +                           rndtest_runs_tab[i].max);
17645 +                       rv = -1;
17646 +               } else {
17647 +                       rndtest_report(rsp, 0,
17648 +                           "runs pass %s interval %d (%d < %d < %d)",
17649 +                           val ? "ones" : "zeros",
17650 +                           i + 1, rndtest_runs_tab[i].min, src[i],
17651 +                           rndtest_runs_tab[i].max);
17652 +               }
17653 +       }
17654 +       return (rv);
17655 +}
17656 +
17657 +static int
17658 +rndtest_longruns(struct rndtest_state *rsp)
17659 +{
17660 +       int i, j, ones = 0, zeros = 0, maxones = 0, maxzeros = 0;
17661 +       u_int8_t c;
17662 +
17663 +       for (i = 0; i < RNDTEST_NBYTES; i++) {
17664 +               c = rsp->rs_buf[i];
17665 +               for (j = 0; j < 8; j++, c <<= 1) {
17666 +                       if (c & 0x80) {
17667 +                               zeros = 0;
17668 +                               ones++;
17669 +                               if (ones > maxones)
17670 +                                       maxones = ones;
17671 +                       } else {
17672 +                               ones = 0;
17673 +                               zeros++;
17674 +                               if (zeros > maxzeros)
17675 +                                       maxzeros = zeros;
17676 +                       }
17677 +               }
17678 +       }
17679 +
17680 +       if (maxones < 26 && maxzeros < 26) {
17681 +               rndtest_report(rsp, 0, "longruns pass (%d ones, %d zeros)",
17682 +                       maxones, maxzeros);
17683 +               return (0);
17684 +       } else {
17685 +               rndtest_report(rsp, 1, "longruns fail (%d ones, %d zeros)",
17686 +                       maxones, maxzeros);
17687 +               rndstats.rst_longruns++;
17688 +               return (-1);
17689 +       }
17690 +}
17691 +
17692 +/*
17693 + * chi^2 test over 4 bits: (this is called the poker test in FIPS 140-2,
17694 + * but it is really the chi^2 test over 4 bits (the poker test as described
17695 + * by Knuth vol 2 is something different, and I take him as authoritative
17696 + * on nomenclature over NIST).
17697 + */
17698 +#define        RNDTEST_CHI4_K  16
17699 +#define        RNDTEST_CHI4_K_MASK     (RNDTEST_CHI4_K - 1)
17700 +
17701 +/*
17702 + * The unnormalized values are used so that we don't have to worry about
17703 + * fractional precision.  The "real" value is found by:
17704 + *     (V - 1562500) * (16 / 5000) = Vn   (where V is the unnormalized value)
17705 + */
17706 +#define        RNDTEST_CHI4_VMIN       1563181         /* 2.1792 */
17707 +#define        RNDTEST_CHI4_VMAX       1576929         /* 46.1728 */
17708 +
17709 +static int
17710 +rndtest_chi_4(struct rndtest_state *rsp)
17711 +{
17712 +       unsigned int freq[RNDTEST_CHI4_K], i, sum;
17713 +
17714 +       for (i = 0; i < RNDTEST_CHI4_K; i++)
17715 +               freq[i] = 0;
17716 +
17717 +       /* Get number of occurances of each 4 bit pattern */
17718 +       for (i = 0; i < RNDTEST_NBYTES; i++) {
17719 +               freq[(rsp->rs_buf[i] >> 4) & RNDTEST_CHI4_K_MASK]++;
17720 +               freq[(rsp->rs_buf[i] >> 0) & RNDTEST_CHI4_K_MASK]++;
17721 +       }
17722 +
17723 +       for (i = 0, sum = 0; i < RNDTEST_CHI4_K; i++)
17724 +               sum += freq[i] * freq[i];
17725 +
17726 +       if (sum >= 1563181 && sum <= 1576929) {
17727 +               rndtest_report(rsp, 0, "chi^2(4): pass (sum %u)", sum);
17728 +               return (0);
17729 +       } else {
17730 +               rndtest_report(rsp, 1, "chi^2(4): failed (sum %u)", sum);
17731 +               rndstats.rst_chi++;
17732 +               return (-1);
17733 +       }
17734 +}
17735 +
17736 +int
17737 +rndtest_buf(unsigned char *buf)
17738 +{
17739 +       struct rndtest_state rsp;
17740 +
17741 +       memset(&rsp, 0, sizeof(rsp));
17742 +       rsp.rs_buf = buf;
17743 +       rndtest_test(&rsp);
17744 +       return(rsp.rs_discard);
17745 +}
17746 +
17747 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
17748 +++ linux/crypto/ocf/rndtest.h  2005-05-20 10:28:26.000000000 +1000
17749 @@ -0,0 +1,54 @@
17750 +/*     $FreeBSD: src/sys/dev/rndtest/rndtest.h,v 1.1 2003/03/11 22:54:44 sam Exp $     */
17751 +/*     $OpenBSD$       */
17752 +
17753 +/*
17754 + * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
17755 + * All rights reserved.
17756 + *
17757 + * Redistribution and use in source and binary forms, with or without
17758 + * modification, are permitted provided that the following conditions
17759 + * are met:
17760 + * 1. Redistributions of source code must retain the above copyright
17761 + *    notice, this list of conditions and the following disclaimer.
17762 + * 2. Redistributions in binary form must reproduce the above copyright
17763 + *    notice, this list of conditions and the following disclaimer in the
17764 + *    documentation and/or other materials provided with the distribution.
17765 + * 3. All advertising materials mentioning features or use of this software
17766 + *    must display the following acknowledgement:
17767 + *     This product includes software developed by Jason L. Wright
17768 + * 4. The name of the author may not be used to endorse or promote products
17769 + *    derived from this software without specific prior written permission.
17770 + *
17771 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17772 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17773 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17774 + * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
17775 + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
17776 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
17777 + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
17778 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
17779 + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
17780 + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
17781 + * POSSIBILITY OF SUCH DAMAGE.
17782 + */
17783 +
17784 +
17785 +/* Some of the tests depend on these values */
17786 +#define        RNDTEST_NBYTES  2500
17787 +#define        RNDTEST_NBITS   (8 * RNDTEST_NBYTES)
17788 +
17789 +struct rndtest_state {
17790 +       int             rs_discard;     /* discard/accept random data */
17791 +       u_int8_t        *rs_buf;
17792 +};
17793 +
17794 +struct rndtest_stats {
17795 +       u_int32_t       rst_discard;    /* number of bytes discarded */
17796 +       u_int32_t       rst_tests;      /* number of test runs */
17797 +       u_int32_t       rst_monobit;    /* monobit test failures */
17798 +       u_int32_t       rst_runs;       /* 0/1 runs failures */
17799 +       u_int32_t       rst_longruns;   /* longruns failures */
17800 +       u_int32_t       rst_chi;        /* chi^2 failures */
17801 +};
17802 +
17803 +extern int rndtest_buf(unsigned char *buf);
17804 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
17805 +++ linux/crypto/ocf/ocf-compat.h       2008-04-27 09:30:47.000000000 +1000
17806 @@ -0,0 +1,268 @@
17807 +#ifndef _BSD_COMPAT_H_
17808 +#define _BSD_COMPAT_H_ 1
17809 +/****************************************************************************/
17810 +/*
17811 + * Provide compat routines for older linux kernels and BSD kernels
17812 + *
17813 + * Written by David McCullough <david_mccullough@securecomputing.com>
17814 + * Copyright (C) 2007 David McCullough <david_mccullough@securecomputing.com>
17815 + *
17816 + * LICENSE TERMS
17817 + *
17818 + * The free distribution and use of this software in both source and binary
17819 + * form is allowed (with or without changes) provided that:
17820 + *
17821 + *   1. distributions of this source code include the above copyright
17822 + *      notice, this list of conditions and the following disclaimer;
17823 + *
17824 + *   2. distributions in binary form include the above copyright
17825 + *      notice, this list of conditions and the following disclaimer
17826 + *      in the documentation and/or other associated materials;
17827 + *
17828 + *   3. the copyright holder's name is not used to endorse products
17829 + *      built using this software without specific written permission.
17830 + *
17831 + * ALTERNATIVELY, provided that this notice is retained in full, this file
17832 + * may be distributed under the terms of the GNU General Public License (GPL),
17833 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
17834 + *
17835 + * DISCLAIMER
17836 + *
17837 + * This software is provided 'as is' with no explicit or implied warranties
17838 + * in respect of its properties, including, but not limited to, correctness
17839 + * and/or fitness for purpose.
17840 + */
17841 +/****************************************************************************/
17842 +#ifdef __KERNEL__
17843 +/*
17844 + * fake some BSD driver interface stuff specifically for OCF use
17845 + */
17846 +
17847 +typedef struct ocf_device *device_t;
17848 +
17849 +typedef struct {
17850 +       int (*cryptodev_newsession)(device_t dev, u_int32_t *sidp, struct cryptoini *cri);
17851 +       int (*cryptodev_freesession)(device_t dev, u_int64_t tid);
17852 +       int (*cryptodev_process)(device_t dev, struct cryptop *crp, int hint);
17853 +       int (*cryptodev_kprocess)(device_t dev, struct cryptkop *krp, int hint);
17854 +} device_method_t;
17855 +#define DEVMETHOD(id, func)    id: func
17856 +
17857 +struct ocf_device {
17858 +       char name[32];          /* the driver name */
17859 +       char nameunit[32];      /* the driver name + HW instance */
17860 +       int  unit;
17861 +       device_method_t methods;
17862 +       void *softc;
17863 +};
17864 +
17865 +#define CRYPTODEV_NEWSESSION(dev, sid, cri) \
17866 +       ((*(dev)->methods.cryptodev_newsession)(dev,sid,cri))
17867 +#define CRYPTODEV_FREESESSION(dev, sid) \
17868 +       ((*(dev)->methods.cryptodev_freesession)(dev, sid))
17869 +#define CRYPTODEV_PROCESS(dev, crp, hint) \
17870 +       ((*(dev)->methods.cryptodev_process)(dev, crp, hint))
17871 +#define CRYPTODEV_KPROCESS(dev, krp, hint) \
17872 +       ((*(dev)->methods.cryptodev_kprocess)(dev, krp, hint))
17873 +
17874 +#define device_get_name(dev)   ((dev)->name)
17875 +#define device_get_nameunit(dev)       ((dev)->nameunit)
17876 +#define device_get_unit(dev)   ((dev)->unit)
17877 +#define device_get_softc(dev)  ((dev)->softc)
17878 +
17879 +#define        softc_device_decl \
17880 +               struct ocf_device _device; \
17881 +               device_t
17882 +
17883 +#define        softc_device_init(_sc, _name, _unit, _methods) \
17884 +       if (1) {\
17885 +       strncpy((_sc)->_device.name, _name, sizeof((_sc)->_device.name) - 1); \
17886 +       snprintf((_sc)->_device.nameunit, sizeof((_sc)->_device.name), "%s%d", _name, _unit); \
17887 +       (_sc)->_device.unit = _unit; \
17888 +       (_sc)->_device.methods = _methods; \
17889 +       (_sc)->_device.softc = (void *) _sc; \
17890 +       *(device_t *)((softc_get_device(_sc))+1) = &(_sc)->_device; \
17891 +       } else
17892 +
17893 +#define        softc_get_device(_sc)   (&(_sc)->_device)
17894 +
17895 +/*
17896 + * iomem support for 2.4 and 2.6 kernels
17897 + */
17898 +#include <linux/version.h>
17899 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
17900 +#define ocf_iomem_t    unsigned long
17901 +
17902 +/*
17903 + * implement simple workqueue like support for older kernels
17904 + */
17905 +
17906 +#include <linux/tqueue.h>
17907 +
17908 +#define work_struct tq_struct
17909 +
17910 +#define INIT_WORK(wp, fp, ap) \
17911 +       do { \
17912 +               (wp)->sync = 0; \
17913 +               (wp)->routine = (fp); \
17914 +               (wp)->data = (ap); \
17915 +       } while (0)
17916 +
17917 +#define schedule_work(wp) \
17918 +       do { \
17919 +               queue_task((wp), &tq_immediate); \
17920 +               mark_bh(IMMEDIATE_BH); \
17921 +       } while (0)
17922 +
17923 +#define flush_scheduled_work() run_task_queue(&tq_immediate)
17924 +
17925 +#else
17926 +#define ocf_iomem_t    void __iomem *
17927 +
17928 +#include <linux/workqueue.h>
17929 +
17930 +#endif
17931 +
17932 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
17933 +#define files_fdtable(files)   (files)
17934 +#endif
17935 +
17936 +#ifdef MODULE_PARM
17937 +#undef module_param    /* just in case */
17938 +#define        module_param(a,b,c)             MODULE_PARM(a,"i")
17939 +#endif
17940 +
17941 +#define bzero(s,l)             memset(s,0,l)
17942 +#define bcopy(s,d,l)   memcpy(d,s,l)
17943 +#define bcmp(x, y, l)  memcmp(x,y,l)
17944 +
17945 +#define MIN(x,y)       ((x) < (y) ? (x) : (y))
17946 +
17947 +#define device_printf(dev, a...) ({ \
17948 +                               printk("%s: ", device_get_nameunit(dev)); printk(a); \
17949 +                       })
17950 +
17951 +#undef printf
17952 +#define printf(fmt...) printk(fmt)
17953 +
17954 +#define KASSERT(c,p)   if (!(c)) { printk p ; } else
17955 +
17956 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
17957 +#define ocf_daemonize(str) \
17958 +       daemonize(); \
17959 +       spin_lock_irq(&current->sigmask_lock); \
17960 +       sigemptyset(&current->blocked); \
17961 +       recalc_sigpending(current); \
17962 +       spin_unlock_irq(&current->sigmask_lock); \
17963 +       sprintf(current->comm, str);
17964 +#else
17965 +#define ocf_daemonize(str) daemonize(str);
17966 +#endif
17967 +
17968 +#define        TAILQ_INSERT_TAIL(q,d,m) list_add_tail(&(d)->m, (q))
17969 +#define        TAILQ_EMPTY(q)  list_empty(q)
17970 +#define        TAILQ_FOREACH(v, q, m) list_for_each_entry(v, q, m)
17971 +
17972 +#define read_random(p,l) get_random_bytes(p,l)
17973 +
17974 +#define DELAY(x)       ((x) > 2000 ? mdelay((x)/1000) : udelay(x))
17975 +#define strtoul simple_strtoul
17976 +
17977 +#define pci_get_vendor(dev)    ((dev)->vendor)
17978 +#define pci_get_device(dev)    ((dev)->device)
17979 +
17980 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
17981 +#define pci_set_consistent_dma_mask(dev, mask) (0)
17982 +#endif
17983 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
17984 +#define pci_dma_sync_single_for_cpu pci_dma_sync_single
17985 +#endif
17986 +
17987 +#ifndef DMA_32BIT_MASK
17988 +#define DMA_32BIT_MASK  0x00000000ffffffffULL
17989 +#endif
17990 +
17991 +#define htole32(x)     cpu_to_le32(x)
17992 +#define htobe32(x)     cpu_to_be32(x)
17993 +#define htole16(x)     cpu_to_le16(x)
17994 +#define htobe16(x)     cpu_to_be16(x)
17995 +
17996 +/* older kernels don't have these */
17997 +
17998 +#ifndef IRQ_NONE
17999 +#define IRQ_NONE
18000 +#define IRQ_HANDLED
18001 +#define irqreturn_t void
18002 +#endif
18003 +#ifndef IRQF_SHARED
18004 +#define IRQF_SHARED    SA_SHIRQ
18005 +#endif
18006 +
18007 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
18008 +# define strlcpy(dest,src,len) \
18009 +               ({strncpy(dest,src,(len)-1); ((char *)dest)[(len)-1] = '\0'; })
18010 +#endif
18011 +
18012 +#ifndef MAX_ERRNO
18013 +#define MAX_ERRNO      4095
18014 +#endif
18015 +#ifndef IS_ERR_VALUE
18016 +#define IS_ERR_VALUE(x) ((unsigned long)(x) >= (unsigned long)-MAX_ERRNO)
18017 +#endif
18018 +
18019 +/*
18020 + * common debug for all
18021 + */
18022 +#if 1
18023 +#define dprintk(a...)  do { if (debug) printk(a); } while(0)
18024 +#else
18025 +#define dprintk(a...)
18026 +#endif
18027 +
18028 +#ifndef SLAB_ATOMIC
18029 +/* Changed in 2.6.20, must use GFP_ATOMIC now */
18030 +#define        SLAB_ATOMIC     GFP_ATOMIC
18031 +#endif
18032 +
18033 +/*
18034 + * need some additional support for older kernels */
18035 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,2)
18036 +#define pci_register_driver_compat(driver, rc) \
18037 +       do { \
18038 +               if ((rc) > 0) { \
18039 +                       (rc) = 0; \
18040 +               } else if (rc == 0) { \
18041 +                       (rc) = -ENODEV; \
18042 +               } else { \
18043 +                       pci_unregister_driver(driver); \
18044 +               } \
18045 +       } while (0)
18046 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
18047 +#define pci_register_driver_compat(driver,rc) ((rc) = (rc) < 0 ? (rc) : 0)
18048 +#else
18049 +#define pci_register_driver_compat(driver,rc)
18050 +#endif
18051 +
18052 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
18053 +
18054 +#include <asm/scatterlist.h>
18055 +
18056 +static inline void sg_set_page(struct scatterlist *sg,  struct page *page,
18057 +                              unsigned int len, unsigned int offset)
18058 +{
18059 +       sg->page = page;
18060 +       sg->offset = offset;
18061 +       sg->length = len;
18062 +}
18063 +
18064 +static inline void *sg_virt(struct scatterlist *sg)
18065 +{
18066 +       return page_address(sg->page) + sg->offset;
18067 +}
18068 +
18069 +#endif
18070 +
18071 +#endif /* __KERNEL__ */
18072 +
18073 +/****************************************************************************/
18074 +#endif /* _BSD_COMPAT_H_ */
18075 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
18076 +++ linux/crypto/ocf/pasemi/pasemi.c    2007-12-12 11:36:18.000000000 +1000
18077 @@ -0,0 +1,1009 @@
18078 +/*
18079 + * Copyright (C) 2007 PA Semi, Inc
18080 + *
18081 + * Driver for the PA Semi PWRficient DMA Crypto Engine
18082 + *
18083 + * This program is free software; you can redistribute it and/or modify
18084 + * it under the terms of the GNU General Public License version 2 as
18085 + * published by the Free Software Foundation.
18086 + *
18087 + * This program is distributed in the hope that it will be useful,
18088 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
18089 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18090 + * GNU General Public License for more details.
18091 + *
18092 + * You should have received a copy of the GNU General Public License
18093 + * along with this program; if not, write to the Free Software
18094 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18095 + */
18096 +
18097 +#ifndef AUTOCONF_INCLUDED
18098 +#include <linux/config.h>
18099 +#endif
18100 +#include <linux/module.h>
18101 +#include <linux/init.h>
18102 +#include <linux/interrupt.h>
18103 +#include <linux/timer.h>
18104 +#include <linux/random.h>
18105 +#include <linux/skbuff.h>
18106 +#include <asm/scatterlist.h>
18107 +#include <linux/moduleparam.h>
18108 +#include <linux/pci.h>
18109 +#include <cryptodev.h>
18110 +#include <uio.h>
18111 +#include "pasemi_fnu.h"
18112 +
18113 +#define DRV_NAME "pasemi"
18114 +
18115 +#define TIMER_INTERVAL 1000
18116 +
18117 +static void __devexit pasemi_dma_remove(struct pci_dev *pdev);
18118 +static struct pasdma_status volatile * dma_status;
18119 +
18120 +static int debug;
18121 +module_param(debug, int, 0644);
18122 +MODULE_PARM_DESC(debug, "Enable debug");
18123 +
18124 +static void pasemi_desc_start(struct pasemi_desc *desc, u64 hdr)
18125 +{
18126 +       desc->postop = 0;
18127 +       desc->quad[0] = hdr;
18128 +       desc->quad_cnt = 1;
18129 +       desc->size = 1;
18130 +}
18131 +
18132 +static void pasemi_desc_build(struct pasemi_desc *desc, u64 val)
18133 +{
18134 +       desc->quad[desc->quad_cnt++] = val;
18135 +       desc->size = (desc->quad_cnt + 1) / 2;
18136 +}
18137 +
18138 +static void pasemi_desc_hdr(struct pasemi_desc *desc, u64 hdr)
18139 +{
18140 +       desc->quad[0] |= hdr;
18141 +}
18142 +
18143 +static int pasemi_desc_size(struct pasemi_desc *desc)
18144 +{
18145 +       return desc->size;
18146 +}
18147 +
18148 +static void pasemi_ring_add_desc(
18149 +                                struct pasemi_fnu_txring *ring,
18150 +                                struct pasemi_desc *desc,
18151 +                                struct cryptop *crp) {
18152 +       int i;
18153 +       int ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
18154 +
18155 +       TX_DESC_INFO(ring, ring->next_to_fill).desc_size = desc->size;
18156 +       TX_DESC_INFO(ring, ring->next_to_fill).desc_postop = desc->postop;
18157 +       TX_DESC_INFO(ring, ring->next_to_fill).cf_crp = crp;
18158 +
18159 +       for (i = 0; i < desc->quad_cnt; i += 2) {
18160 +               ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
18161 +               ring->desc[ring_index] = desc->quad[i];
18162 +               ring->desc[ring_index + 1] = desc->quad[i + 1];
18163 +               ring->next_to_fill++;
18164 +       }
18165 +
18166 +       if (desc->quad_cnt & 1)
18167 +               ring->desc[ring_index + 1] = 0;
18168 +}
18169 +
18170 +static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr)
18171 +{
18172 +       out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index),
18173 +                incr);
18174 +}
18175 +
18176 +/*
18177 + * Generate a new software session.
18178 + */
18179 +static int
18180 +pasemi_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
18181 +{
18182 +       struct cryptoini *c, *encini = NULL, *macini = NULL;
18183 +       struct pasemi_softc *sc = device_get_softc(dev);
18184 +       struct pasemi_session *ses = NULL, **sespp;
18185 +       int sesn, blksz = 0;
18186 +       u64 ccmd = 0;
18187 +       unsigned long flags;
18188 +       struct pasemi_desc init_desc;
18189 +       struct pasemi_fnu_txring *txring;
18190 +
18191 +       DPRINTF("%s()\n", __FUNCTION__);
18192 +       if (sidp == NULL || cri == NULL || sc == NULL) {
18193 +               DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
18194 +               return -EINVAL;
18195 +       }
18196 +       for (c = cri; c != NULL; c = c->cri_next) {
18197 +               if (ALG_IS_SIG(c->cri_alg)) {
18198 +                       if (macini)
18199 +                               return -EINVAL;
18200 +                       macini = c;
18201 +               } else if (ALG_IS_CIPHER(c->cri_alg)) {
18202 +                       if (encini)
18203 +                               return -EINVAL;
18204 +                       encini = c;
18205 +               } else {
18206 +                       DPRINTF("UNKNOWN c->cri_alg %d\n", c->cri_alg);
18207 +                       return -EINVAL;
18208 +               }
18209 +       }
18210 +       if (encini == NULL && macini == NULL)
18211 +               return -EINVAL;
18212 +       if (encini) {
18213 +               /* validate key length */
18214 +               switch (encini->cri_alg) {
18215 +               case CRYPTO_DES_CBC:
18216 +                       if (encini->cri_klen != 64)
18217 +                               return -EINVAL;
18218 +                       ccmd = DMA_CALGO_DES;
18219 +                       break;
18220 +               case CRYPTO_3DES_CBC:
18221 +                       if (encini->cri_klen != 192)
18222 +                               return -EINVAL;
18223 +                       ccmd = DMA_CALGO_3DES;
18224 +                       break;
18225 +               case CRYPTO_AES_CBC:
18226 +                       if (encini->cri_klen != 128 &&
18227 +                           encini->cri_klen != 192 &&
18228 +                           encini->cri_klen != 256)
18229 +                               return -EINVAL;
18230 +                       ccmd = DMA_CALGO_AES;
18231 +                       break;
18232 +               case CRYPTO_ARC4:
18233 +                       if (encini->cri_klen != 128)
18234 +                               return -EINVAL;
18235 +                       ccmd = DMA_CALGO_ARC;
18236 +                       break;
18237 +               default:
18238 +                       DPRINTF("UNKNOWN encini->cri_alg %d\n",
18239 +                               encini->cri_alg);
18240 +                       return -EINVAL;
18241 +               }
18242 +       }
18243 +
18244 +       if (macini) {
18245 +               switch (macini->cri_alg) {
18246 +               case CRYPTO_MD5:
18247 +               case CRYPTO_MD5_HMAC:
18248 +                       blksz = 16;
18249 +                       break;
18250 +               case CRYPTO_SHA1:
18251 +               case CRYPTO_SHA1_HMAC:
18252 +                       blksz = 20;
18253 +                       break;
18254 +               default:
18255 +                       DPRINTF("UNKNOWN macini->cri_alg %d\n",
18256 +                               macini->cri_alg);
18257 +                       return -EINVAL;
18258 +               }
18259 +               if (((macini->cri_klen + 7) / 8) > blksz) {
18260 +                       DPRINTF("key length %d bigger than blksize %d not supported\n",
18261 +                               ((macini->cri_klen + 7) / 8), blksz);
18262 +                       return -EINVAL;
18263 +               }
18264 +       }
18265 +
18266 +       for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
18267 +               if (sc->sc_sessions[sesn] == NULL) {
18268 +                       sc->sc_sessions[sesn] = (struct pasemi_session *)
18269 +                               kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
18270 +                       ses = sc->sc_sessions[sesn];
18271 +                       break;
18272 +               } else if (sc->sc_sessions[sesn]->used == 0) {
18273 +                       ses = sc->sc_sessions[sesn];
18274 +                       break;
18275 +               }
18276 +       }
18277 +
18278 +       if (ses == NULL) {
18279 +               sespp = (struct pasemi_session **)
18280 +                       kzalloc(sc->sc_nsessions * 2 *
18281 +                               sizeof(struct pasemi_session *), GFP_ATOMIC);
18282 +               if (sespp == NULL)
18283 +                       return -ENOMEM;
18284 +               memcpy(sespp, sc->sc_sessions,
18285 +                      sc->sc_nsessions * sizeof(struct pasemi_session *));
18286 +               kfree(sc->sc_sessions);
18287 +               sc->sc_sessions = sespp;
18288 +               sesn = sc->sc_nsessions;
18289 +               ses = sc->sc_sessions[sesn] = (struct pasemi_session *)
18290 +                       kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
18291 +               if (ses == NULL)
18292 +                       return -ENOMEM;
18293 +               sc->sc_nsessions *= 2;
18294 +       }
18295 +
18296 +       ses->used = 1;
18297 +
18298 +       ses->dma_addr = pci_map_single(sc->dma_pdev, (void *) ses->civ,
18299 +                                      sizeof(struct pasemi_session), DMA_TO_DEVICE);
18300 +
18301 +       /* enter the channel scheduler */
18302 +       spin_lock_irqsave(&sc->sc_chnlock, flags);
18303 +
18304 +       /* ARC4 has to be processed by the even channel */
18305 +       if (encini && (encini->cri_alg == CRYPTO_ARC4))
18306 +               ses->chan = sc->sc_lastchn & ~1;
18307 +       else
18308 +               ses->chan = sc->sc_lastchn;
18309 +       sc->sc_lastchn = (sc->sc_lastchn + 1) % sc->sc_num_channels;
18310 +
18311 +       spin_unlock_irqrestore(&sc->sc_chnlock, flags);
18312 +
18313 +       txring = &sc->tx[ses->chan];
18314 +
18315 +       if (encini) {
18316 +               ses->ccmd = ccmd;
18317 +
18318 +               /* get an IV */
18319 +               /* XXX may read fewer than requested */
18320 +               get_random_bytes(ses->civ, sizeof(ses->civ));
18321 +
18322 +               ses->keysz = (encini->cri_klen - 63) / 64;
18323 +               memcpy(ses->key, encini->cri_key, (ses->keysz + 1) * 8);
18324 +
18325 +               pasemi_desc_start(&init_desc,
18326 +                                 XCT_CTRL_HDR(ses->chan, (encini && macini) ? 0x68 : 0x40, DMA_FN_CIV0));
18327 +               pasemi_desc_build(&init_desc,
18328 +                                 XCT_FUN_SRC_PTR((encini && macini) ? 0x68 : 0x40, ses->dma_addr));
18329 +       }
18330 +       if (macini) {
18331 +               if (macini->cri_alg == CRYPTO_MD5_HMAC ||
18332 +                   macini->cri_alg == CRYPTO_SHA1_HMAC)
18333 +                       memcpy(ses->hkey, macini->cri_key, blksz);
18334 +               else {
18335 +                       /* Load initialization constants(RFC 1321, 3174) */
18336 +                       ses->hiv[0] = 0x67452301efcdab89ULL;
18337 +                       ses->hiv[1] = 0x98badcfe10325476ULL;
18338 +                       ses->hiv[2] = 0xc3d2e1f000000000ULL;
18339 +               }
18340 +               ses->hseq = 0ULL;
18341 +       }
18342 +
18343 +       spin_lock_irqsave(&txring->fill_lock, flags);
18344 +
18345 +       if (((txring->next_to_fill + pasemi_desc_size(&init_desc)) -
18346 +            txring->next_to_clean) > TX_RING_SIZE) {
18347 +               spin_unlock_irqrestore(&txring->fill_lock, flags);
18348 +               return ERESTART;
18349 +       }
18350 +
18351 +       if (encini) {
18352 +               pasemi_ring_add_desc(txring, &init_desc, NULL);
18353 +               pasemi_ring_incr(sc, ses->chan,
18354 +                                pasemi_desc_size(&init_desc));
18355 +       }
18356 +
18357 +       txring->sesn = sesn;
18358 +       spin_unlock_irqrestore(&txring->fill_lock, flags);
18359 +
18360 +       *sidp = PASEMI_SID(sesn);
18361 +       return 0;
18362 +}
18363 +
18364 +/*
18365 + * Deallocate a session.
18366 + */
18367 +static int
18368 +pasemi_freesession(device_t dev, u_int64_t tid)
18369 +{
18370 +       struct pasemi_softc *sc = device_get_softc(dev);
18371 +       int session;
18372 +       u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
18373 +
18374 +       DPRINTF("%s()\n", __FUNCTION__);
18375 +
18376 +       if (sc == NULL)
18377 +               return -EINVAL;
18378 +       session = PASEMI_SESSION(sid);
18379 +       if (session >= sc->sc_nsessions || !sc->sc_sessions[session])
18380 +               return -EINVAL;
18381 +
18382 +       pci_unmap_single(sc->dma_pdev,
18383 +                        sc->sc_sessions[session]->dma_addr,
18384 +                        sizeof(struct pasemi_session), DMA_TO_DEVICE);
18385 +       memset(sc->sc_sessions[session], 0,
18386 +              sizeof(struct pasemi_session));
18387 +
18388 +       return 0;
18389 +}
18390 +
18391 +static int
18392 +pasemi_process(device_t dev, struct cryptop *crp, int hint)
18393 +{
18394 +
18395 +       int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
18396 +       struct pasemi_softc *sc = device_get_softc(dev);
18397 +       struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
18398 +       caddr_t ivp;
18399 +       struct pasemi_desc init_desc, work_desc;
18400 +       struct pasemi_session *ses;
18401 +       struct sk_buff *skb;
18402 +       struct uio *uiop;
18403 +       unsigned long flags;
18404 +       struct pasemi_fnu_txring *txring;
18405 +
18406 +       DPRINTF("%s()\n", __FUNCTION__);
18407 +
18408 +       if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
18409 +               return -EINVAL;
18410 +
18411 +       crp->crp_etype = 0;
18412 +       if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
18413 +               return -EINVAL;
18414 +
18415 +       ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];
18416 +
18417 +       crd1 = crp->crp_desc;
18418 +       if (crd1 == NULL) {
18419 +               err = -EINVAL;
18420 +               goto errout;
18421 +       }
18422 +       crd2 = crd1->crd_next;
18423 +
18424 +       if (ALG_IS_SIG(crd1->crd_alg)) {
18425 +               maccrd = crd1;
18426 +               if (crd2 == NULL)
18427 +                       enccrd = NULL;
18428 +               else if (ALG_IS_CIPHER(crd2->crd_alg) &&
18429 +                        (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
18430 +                       enccrd = crd2;
18431 +               else
18432 +                       goto erralg;
18433 +       } else if (ALG_IS_CIPHER(crd1->crd_alg)) {
18434 +               enccrd = crd1;
18435 +               if (crd2 == NULL)
18436 +                       maccrd = NULL;
18437 +               else if (ALG_IS_SIG(crd2->crd_alg) &&
18438 +                        (crd1->crd_flags & CRD_F_ENCRYPT))
18439 +                       maccrd = crd2;
18440 +               else
18441 +                       goto erralg;
18442 +       } else
18443 +               goto erralg;
18444 +
18445 +       chsel = ses->chan;
18446 +
18447 +       txring = &sc->tx[chsel];
18448 +
18449 +       if (enccrd && !maccrd) {
18450 +               if (enccrd->crd_alg == CRYPTO_ARC4)
18451 +                       reinit = 1;
18452 +               reinit_size = 0x40;
18453 +               srclen = crp->crp_ilen;
18454 +
18455 +               pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
18456 +                                 | XCT_FUN_FUN(chsel));
18457 +               if (enccrd->crd_flags & CRD_F_ENCRYPT)
18458 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
18459 +               else
18460 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
18461 +       } else if (enccrd && maccrd) {
18462 +               if (enccrd->crd_alg == CRYPTO_ARC4)
18463 +                       reinit = 1;
18464 +               reinit_size = 0x68;
18465 +
18466 +               if (enccrd->crd_flags & CRD_F_ENCRYPT) {
18467 +                       /* Encrypt -> Authenticate */
18468 +                       pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
18469 +                                         | XCT_FUN_A | XCT_FUN_FUN(chsel));
18470 +                       srclen = maccrd->crd_skip + maccrd->crd_len;
18471 +               } else {
18472 +                       /* Authenticate -> Decrypt */
18473 +                       pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
18474 +                                         | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
18475 +                       pasemi_desc_build(&work_desc, 0);
18476 +                       pasemi_desc_build(&work_desc, 0);
18477 +                       pasemi_desc_build(&work_desc, 0);
18478 +                       work_desc.postop = PASEMI_CHECK_SIG;
18479 +                       srclen = crp->crp_ilen;
18480 +               }
18481 +
18482 +               pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
18483 +               pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
18484 +       } else if (!enccrd && maccrd) {
18485 +               srclen = maccrd->crd_len;
18486 +
18487 +               pasemi_desc_start(&init_desc,
18488 +                                 XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
18489 +               pasemi_desc_build(&init_desc,
18490 +                                 XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));
18491 +
18492 +               pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
18493 +                                 | XCT_FUN_A | XCT_FUN_FUN(chsel));
18494 +       }
18495 +
18496 +       if (enccrd) {
18497 +               switch (enccrd->crd_alg) {
18498 +               case CRYPTO_3DES_CBC:
18499 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
18500 +                                       XCT_FUN_BCM_CBC);
18501 +                       ivsize = sizeof(u64);
18502 +                       break;
18503 +               case CRYPTO_DES_CBC:
18504 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
18505 +                                       XCT_FUN_BCM_CBC);
18506 +                       ivsize = sizeof(u64);
18507 +                       break;
18508 +               case CRYPTO_AES_CBC:
18509 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
18510 +                                       XCT_FUN_BCM_CBC);
18511 +                       ivsize = 2 * sizeof(u64);
18512 +                       break;
18513 +               case CRYPTO_ARC4:
18514 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
18515 +                       ivsize = 0;
18516 +                       break;
18517 +               default:
18518 +                       printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
18519 +                              enccrd->crd_alg);
18520 +                       err = -EINVAL;
18521 +                       goto errout;
18522 +               }
18523 +
18524 +               ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
18525 +               if (enccrd->crd_flags & CRD_F_ENCRYPT) {
18526 +                       if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
18527 +                               memcpy(ivp, enccrd->crd_iv, ivsize);
18528 +                       /* If IV is not present in the buffer already, it has to be copied there */
18529 +                       if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
18530 +                               crypto_copyback(crp->crp_flags, crp->crp_buf,
18531 +                                               enccrd->crd_inject, ivsize, ivp);
18532 +               } else {
18533 +                       if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
18534 +                               /* IV is provided expicitly in descriptor */
18535 +                               memcpy(ivp, enccrd->crd_iv, ivsize);
18536 +                       else
18537 +                               /* IV is provided in the packet */
18538 +                               crypto_copydata(crp->crp_flags, crp->crp_buf,
18539 +                                               enccrd->crd_inject, ivsize,
18540 +                                               ivp);
18541 +               }
18542 +       }
18543 +
18544 +       if (maccrd) {
18545 +               switch (maccrd->crd_alg) {
18546 +               case CRYPTO_MD5:
18547 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
18548 +                                       XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
18549 +                       break;
18550 +               case CRYPTO_SHA1:
18551 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
18552 +                                       XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
18553 +                       break;
18554 +               case CRYPTO_MD5_HMAC:
18555 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
18556 +                                       XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
18557 +                       break;
18558 +               case CRYPTO_SHA1_HMAC:
18559 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
18560 +                                       XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
18561 +                       break;
18562 +               default:
18563 +                       printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
18564 +                              maccrd->crd_alg);
18565 +                       err = -EINVAL;
18566 +                       goto errout;
18567 +               }
18568 +       }
18569 +
18570 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
18571 +               /* using SKB buffers */
18572 +               skb = (struct sk_buff *)crp->crp_buf;
18573 +               if (skb_shinfo(skb)->nr_frags) {
18574 +                       printk(DRV_NAME ": skb frags unimplemented\n");
18575 +                       err = -EINVAL;
18576 +                       goto errout;
18577 +               }
18578 +               pasemi_desc_build(
18579 +                       &work_desc,
18580 +                       XCT_FUN_DST_PTR(skb->len, pci_map_single(
18581 +                                               sc->dma_pdev, skb->data,
18582 +                                               skb->len, DMA_TO_DEVICE)));
18583 +               pasemi_desc_build(
18584 +                       &work_desc,
18585 +                       XCT_FUN_SRC_PTR(
18586 +                               srclen, pci_map_single(
18587 +                                       sc->dma_pdev, skb->data,
18588 +                                       srclen, DMA_TO_DEVICE)));
18589 +               pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
18590 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
18591 +               /* using IOV buffers */
18592 +               uiop = (struct uio *)crp->crp_buf;
18593 +               if (uiop->uio_iovcnt > 1) {
18594 +                       printk(DRV_NAME ": iov frags unimplemented\n");
18595 +                       err = -EINVAL;
18596 +                       goto errout;
18597 +               }
18598 +
18599 +               /* crp_olen is never set; always use crp_ilen */
18600 +               pasemi_desc_build(
18601 +                       &work_desc,
18602 +                       XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
18603 +                                               sc->dma_pdev,
18604 +                                               uiop->uio_iov->iov_base,
18605 +                                               crp->crp_ilen, DMA_TO_DEVICE)));
18606 +               pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
18607 +
18608 +               pasemi_desc_build(
18609 +                       &work_desc,
18610 +                       XCT_FUN_SRC_PTR(srclen, pci_map_single(
18611 +                                               sc->dma_pdev,
18612 +                                               uiop->uio_iov->iov_base,
18613 +                                               srclen, DMA_TO_DEVICE)));
18614 +       } else {
18615 +               /* using contig buffers */
18616 +               pasemi_desc_build(
18617 +                       &work_desc,
18618 +                       XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
18619 +                                               sc->dma_pdev,
18620 +                                               crp->crp_buf,
18621 +                                               crp->crp_ilen, DMA_TO_DEVICE)));
18622 +               pasemi_desc_build(
18623 +                       &work_desc,
18624 +                       XCT_FUN_SRC_PTR(srclen, pci_map_single(
18625 +                                               sc->dma_pdev,
18626 +                                               crp->crp_buf, srclen,
18627 +                                               DMA_TO_DEVICE)));
18628 +               pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
18629 +       }
18630 +
18631 +       spin_lock_irqsave(&txring->fill_lock, flags);
18632 +
18633 +       if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
18634 +               txring->sesn = PASEMI_SESSION(crp->crp_sid);
18635 +               reinit = 1;
18636 +       }
18637 +
18638 +       if (enccrd) {
18639 +               pasemi_desc_start(&init_desc,
18640 +                                 XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
18641 +               pasemi_desc_build(&init_desc,
18642 +                                 XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
18643 +       }
18644 +
18645 +       if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
18646 +             pasemi_desc_size(&work_desc)) -
18647 +            txring->next_to_clean) > TX_RING_SIZE) {
18648 +               spin_unlock_irqrestore(&txring->fill_lock, flags);
18649 +               err = ERESTART;
18650 +               goto errout;
18651 +       }
18652 +
18653 +       pasemi_ring_add_desc(txring, &init_desc, NULL);
18654 +       pasemi_ring_add_desc(txring, &work_desc, crp);
18655 +
18656 +       pasemi_ring_incr(sc, chsel,
18657 +                        pasemi_desc_size(&init_desc) +
18658 +                        pasemi_desc_size(&work_desc));
18659 +
18660 +       spin_unlock_irqrestore(&txring->fill_lock, flags);
18661 +
18662 +       mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);
18663 +
18664 +       return 0;
18665 +
18666 +erralg:
18667 +       printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
18668 +              crd1->crd_alg, crd2->crd_alg);
18669 +       err = -EINVAL;
18670 +
18671 +errout:
18672 +       if (err != ERESTART) {
18673 +               crp->crp_etype = err;
18674 +               crypto_done(crp);
18675 +       }
18676 +       return err;
18677 +}
18678 +
18679 +static int pasemi_clean_tx(struct pasemi_softc *sc, int chan)
18680 +{
18681 +       int i, j, ring_idx;
18682 +       struct pasemi_fnu_txring *ring = &sc->tx[chan];
18683 +       u16 delta_cnt;
18684 +       int flags, loops = 10;
18685 +       int desc_size;
18686 +       struct cryptop *crp;
18687 +
18688 +       spin_lock_irqsave(&ring->clean_lock, flags);
18689 +
18690 +       while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan]
18691 +                            & PAS_STATUS_PCNT_M) - ring->total_pktcnt)
18692 +              && loops--) {
18693 +
18694 +               for (i = 0; i < delta_cnt; i++) {
18695 +                       desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size;
18696 +                       crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp;
18697 +                       if (crp) {
18698 +                               ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
18699 +                               if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) {
18700 +                                       /* Need to make sure signature matched,
18701 +                                        * if not - return error */
18702 +                                       if (!(ring->desc[ring_idx + 1] & (1ULL << 63)))
18703 +                                               crp->crp_etype = -EINVAL;
18704 +                               }
18705 +                               crypto_done(TX_DESC_INFO(ring,
18706 +                                                        ring->next_to_clean).cf_crp);
18707 +                               TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL;
18708 +                               pci_unmap_single(
18709 +                                       sc->dma_pdev,
18710 +                                       XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]),
18711 +                                       PCI_DMA_TODEVICE);
18712 +
18713 +                               ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0;
18714 +
18715 +                               ring->next_to_clean++;
18716 +                               for (j = 1; j < desc_size; j++) {
18717 +                                       ring_idx = 2 *
18718 +                                               (ring->next_to_clean &
18719 +                                                (TX_RING_SIZE-1));
18720 +                                       pci_unmap_single(
18721 +                                               sc->dma_pdev,
18722 +                                               XCT_PTR_ADDR_LEN(ring->desc[ring_idx]),
18723 +                                               PCI_DMA_TODEVICE);
18724 +                                       if (ring->desc[ring_idx + 1])
18725 +                                               pci_unmap_single(
18726 +                                                       sc->dma_pdev,
18727 +                                                       XCT_PTR_ADDR_LEN(
18728 +                                                               ring->desc[
18729 +                                                                       ring_idx + 1]),
18730 +                                                       PCI_DMA_TODEVICE);
18731 +                                       ring->desc[ring_idx] =
18732 +                                               ring->desc[ring_idx + 1] = 0;
18733 +                                       ring->next_to_clean++;
18734 +                               }
18735 +                       } else {
18736 +                               for (j = 0; j < desc_size; j++) {
18737 +                                       ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
18738 +                                       ring->desc[ring_idx] =
18739 +                                               ring->desc[ring_idx + 1] = 0;
18740 +                                       ring->next_to_clean++;
18741 +                               }
18742 +                       }
18743 +               }
18744 +
18745 +               ring->total_pktcnt += delta_cnt;
18746 +       }
18747 +       spin_unlock_irqrestore(&ring->clean_lock, flags);
18748 +
18749 +       return 0;
18750 +}
18751 +
18752 +static void sweepup_tx(struct pasemi_softc *sc)
18753 +{
18754 +       int i;
18755 +
18756 +       for (i = 0; i < sc->sc_num_channels; i++)
18757 +               pasemi_clean_tx(sc, i);
18758 +}
18759 +
18760 +static irqreturn_t pasemi_intr(int irq, void *arg, struct pt_regs *regs)
18761 +{
18762 +       struct pasemi_softc *sc = arg;
18763 +       unsigned int reg;
18764 +       int chan = irq - sc->base_irq;
18765 +       int chan_index = sc->base_chan + chan;
18766 +       u64 stat = dma_status->tx_sta[chan_index];
18767 +
18768 +       DPRINTF("%s()\n", __FUNCTION__);
18769 +
18770 +       if (!(stat & PAS_STATUS_CAUSE_M))
18771 +               return IRQ_NONE;
18772 +
18773 +       pasemi_clean_tx(sc, chan);
18774 +
18775 +       stat = dma_status->tx_sta[chan_index];
18776 +
18777 +       reg = PAS_IOB_DMA_TXCH_RESET_PINTC |
18778 +               PAS_IOB_DMA_TXCH_RESET_PCNT(sc->tx[chan].total_pktcnt);
18779 +
18780 +       if (stat & PAS_STATUS_SOFT)
18781 +               reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
18782 +
18783 +       out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), reg);
18784 +
18785 +
18786 +       return IRQ_HANDLED;
18787 +}
18788 +
18789 +static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan)
18790 +{
18791 +       u32 val;
18792 +       int chan_index = chan + sc->base_chan;
18793 +       int ret;
18794 +       struct pasemi_fnu_txring *ring;
18795 +
18796 +       ring = &sc->tx[chan];
18797 +
18798 +       spin_lock_init(&ring->fill_lock);
18799 +       spin_lock_init(&ring->clean_lock);
18800 +
18801 +       ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) *
18802 +                                 TX_RING_SIZE, GFP_KERNEL);
18803 +       if (!ring->desc_info)
18804 +               return -ENOMEM;
18805 +
18806 +       /* Allocate descriptors */
18807 +       ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev,
18808 +                                       TX_RING_SIZE *
18809 +                                       2 * sizeof(u64),
18810 +                                       &ring->dma, GFP_KERNEL);
18811 +       if (!ring->desc)
18812 +               return -ENOMEM;
18813 +
18814 +       memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64));
18815 +
18816 +       out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30);
18817 +
18818 +       ring->total_pktcnt = 0;
18819 +
18820 +       out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index),
18821 +                PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
18822 +
18823 +       val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
18824 +       val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
18825 +
18826 +       out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val);
18827 +
18828 +       out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index),
18829 +                PAS_DMA_TXCHAN_CFG_TY_FUNC |
18830 +                PAS_DMA_TXCHAN_CFG_TATTR(chan) |
18831 +                PAS_DMA_TXCHAN_CFG_WT(2));
18832 +
18833 +       /* enable tx channel */
18834 +       out_le32(sc->dma_regs +
18835 +                PAS_DMA_TXCHAN_TCMDSTA(chan_index),
18836 +                PAS_DMA_TXCHAN_TCMDSTA_EN);
18837 +
18838 +       out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index),
18839 +                PAS_IOB_DMA_TXCH_CFG_CNTTH(1000));
18840 +
18841 +       ring->next_to_fill = 0;
18842 +       ring->next_to_clean = 0;
18843 +
18844 +       snprintf(ring->irq_name, sizeof(ring->irq_name),
18845 +                "%s%d", "crypto", chan);
18846 +
18847 +       ring->irq = irq_create_mapping(NULL, sc->base_irq + chan);
18848 +       ret = request_irq(ring->irq, (irq_handler_t)
18849 +                         pasemi_intr, IRQF_DISABLED, ring->irq_name, sc);
18850 +       if (ret) {
18851 +               printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n",
18852 +                      ring->irq, ret);
18853 +               ring->irq = -1;
18854 +               return ret;
18855 +       }
18856 +
18857 +       setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc);
18858 +
18859 +       return 0;
18860 +}
18861 +
18862 +static device_method_t pasemi_methods = {
18863 +       /* crypto device methods */
18864 +       DEVMETHOD(cryptodev_newsession,         pasemi_newsession),
18865 +       DEVMETHOD(cryptodev_freesession,        pasemi_freesession),
18866 +       DEVMETHOD(cryptodev_process,            pasemi_process),
18867 +};
18868 +
18869 +/* Set up the crypto device structure, private data,
18870 + * and anything else we need before we start */
18871 +
18872 +static int __devinit
18873 +pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
18874 +{
18875 +       struct pasemi_softc *sc;
18876 +       int ret, i;
18877 +
18878 +       DPRINTF(KERN_ERR "%s()\n", __FUNCTION__);
18879 +
18880 +       sc = kzalloc(sizeof(*sc), GFP_KERNEL);
18881 +       if (!sc)
18882 +               return -ENOMEM;
18883 +
18884 +       softc_device_init(sc, DRV_NAME, 1, pasemi_methods);
18885 +
18886 +       pci_set_drvdata(pdev, sc);
18887 +
18888 +       spin_lock_init(&sc->sc_chnlock);
18889 +
18890 +       sc->sc_sessions = (struct pasemi_session **)
18891 +               kzalloc(PASEMI_INITIAL_SESSIONS *
18892 +                       sizeof(struct pasemi_session *), GFP_ATOMIC);
18893 +       if (sc->sc_sessions == NULL) {
18894 +               ret = -ENOMEM;
18895 +               goto out;
18896 +       }
18897 +
18898 +       sc->sc_nsessions = PASEMI_INITIAL_SESSIONS;
18899 +       sc->sc_lastchn = 0;
18900 +       sc->base_irq = pdev->irq + 6;
18901 +       sc->base_chan = 6;
18902 +       sc->sc_cid = -1;
18903 +       sc->dma_pdev = pdev;
18904 +
18905 +       sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
18906 +       if (!sc->iob_pdev) {
18907 +               dev_err(&pdev->dev, "Can't find I/O Bridge\n");
18908 +               ret = -ENODEV;
18909 +               goto out;
18910 +       }
18911 +
18912 +       /* This is hardcoded and ugly, but we have some firmware versions
18913 +        * who don't provide the register space in the device tree. Luckily
18914 +        * they are at well-known locations so we can just do the math here.
18915 +        */
18916 +       sc->dma_regs =
18917 +               ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000);
18918 +       sc->iob_regs =
18919 +               ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000);
18920 +       if (!sc->dma_regs || !sc->iob_regs) {
18921 +               dev_err(&pdev->dev, "Can't map registers\n");
18922 +               ret = -ENODEV;
18923 +               goto out;
18924 +       }
18925 +
18926 +       dma_status = __ioremap(0xfd800000, 0x1000, 0);
18927 +       if (!dma_status) {
18928 +               ret = -ENODEV;
18929 +               dev_err(&pdev->dev, "Can't map dmastatus space\n");
18930 +               goto out;
18931 +       }
18932 +
18933 +       sc->tx = (struct pasemi_fnu_txring *)
18934 +               kzalloc(sizeof(struct pasemi_fnu_txring)
18935 +                       * 8, GFP_KERNEL);
18936 +       if (!sc->tx) {
18937 +               ret = -ENOMEM;
18938 +               goto out;
18939 +       }
18940 +
18941 +       /* Initialize the h/w */
18942 +       out_le32(sc->dma_regs + PAS_DMA_COM_CFG,
18943 +                (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) |
18944 +                 PAS_DMA_COM_CFG_FWF));
18945 +       out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
18946 +
18947 +       for (i = 0; i < PASEMI_FNU_CHANNELS; i++) {
18948 +               sc->sc_num_channels++;
18949 +               ret = pasemi_dma_setup_tx_resources(sc, i);
18950 +               if (ret)
18951 +                       goto out;
18952 +       }
18953 +
18954 +       sc->sc_cid = crypto_get_driverid(softc_get_device(sc),
18955 +                                        CRYPTOCAP_F_HARDWARE);
18956 +       if (sc->sc_cid < 0) {
18957 +               printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n");
18958 +               ret = -ENXIO;
18959 +               goto out;
18960 +       }
18961 +
18962 +       /* register algorithms with the framework */
18963 +       printk(DRV_NAME ":");
18964 +
18965 +       crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
18966 +       crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
18967 +       crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
18968 +       crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
18969 +       crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
18970 +       crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
18971 +       crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
18972 +       crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
18973 +
18974 +       return 0;
18975 +
18976 +out:
18977 +       pasemi_dma_remove(pdev);
18978 +       return ret;
18979 +}
18980 +
18981 +#define MAX_RETRIES 5000
18982 +
18983 +static void pasemi_free_tx_resources(struct pasemi_softc *sc, int chan)
18984 +{
18985 +       struct pasemi_fnu_txring *ring = &sc->tx[chan];
18986 +       int chan_index = chan + sc->base_chan;
18987 +       int retries;
18988 +       u32 stat;
18989 +
18990 +       /* Stop the channel */
18991 +       out_le32(sc->dma_regs +
18992 +                PAS_DMA_TXCHAN_TCMDSTA(chan_index),
18993 +                PAS_DMA_TXCHAN_TCMDSTA_ST);
18994 +
18995 +       for (retries = 0; retries < MAX_RETRIES; retries++) {
18996 +               stat = in_le32(sc->dma_regs +
18997 +                              PAS_DMA_TXCHAN_TCMDSTA(chan_index));
18998 +               if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
18999 +                       break;
19000 +               cond_resched();
19001 +       }
19002 +
19003 +       if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
19004 +               dev_err(&sc->dma_pdev->dev, "Failed to stop tx channel %d\n",
19005 +                       chan_index);
19006 +
19007 +       /* Disable the channel */
19008 +       out_le32(sc->dma_regs +
19009 +                PAS_DMA_TXCHAN_TCMDSTA(chan_index),
19010 +                0);
19011 +
19012 +       if (ring->desc_info)
19013 +               kfree((void *) ring->desc_info);
19014 +       if (ring->desc)
19015 +               dma_free_coherent(&sc->dma_pdev->dev,
19016 +                                 TX_RING_SIZE *
19017 +                                 2 * sizeof(u64),
19018 +                                 (void *) ring->desc, ring->dma);
19019 +       if (ring->irq != -1)
19020 +               free_irq(ring->irq, sc);
19021 +
19022 +       del_timer(&ring->crypto_timer);
19023 +}
19024 +
19025 +static void __devexit pasemi_dma_remove(struct pci_dev *pdev)
19026 +{
19027 +       struct pasemi_softc *sc = pci_get_drvdata(pdev);
19028 +       int i;
19029 +
19030 +       DPRINTF("%s()\n", __FUNCTION__);
19031 +
19032 +       if (sc->sc_cid >= 0) {
19033 +               crypto_unregister_all(sc->sc_cid);
19034 +       }
19035 +
19036 +       if (sc->tx) {
19037 +               for (i = 0; i < sc->sc_num_channels; i++)
19038 +                       pasemi_free_tx_resources(sc, i);
19039 +
19040 +               kfree(sc->tx);
19041 +       }
19042 +       if (sc->sc_sessions) {
19043 +               for (i = 0; i < sc->sc_nsessions; i++)
19044 +                       kfree(sc->sc_sessions[i]);
19045 +               kfree(sc->sc_sessions);
19046 +       }
19047 +       if (sc->iob_pdev)
19048 +               pci_dev_put(sc->iob_pdev);
19049 +       if (sc->dma_regs)
19050 +               iounmap(sc->dma_regs);
19051 +       if (sc->iob_regs)
19052 +               iounmap(sc->iob_regs);
19053 +       kfree(sc);
19054 +}
19055 +
19056 +static struct pci_device_id pasemi_dma_pci_tbl[] = {
19057 +       { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa007) },
19058 +};
19059 +
19060 +MODULE_DEVICE_TABLE(pci, pasemi_dma_pci_tbl);
19061 +
19062 +static struct pci_driver pasemi_dma_driver = {
19063 +       .name           = "pasemi_dma",
19064 +       .id_table       = pasemi_dma_pci_tbl,
19065 +       .probe          = pasemi_dma_probe,
19066 +       .remove         = __devexit_p(pasemi_dma_remove),
19067 +};
19068 +
19069 +static void __exit pasemi_dma_cleanup_module(void)
19070 +{
19071 +       pci_unregister_driver(&pasemi_dma_driver);
19072 +       __iounmap(dma_status);
19073 +       dma_status = NULL;
19074 +}
19075 +
19076 +int pasemi_dma_init_module(void)
19077 +{
19078 +       return pci_register_driver(&pasemi_dma_driver);
19079 +}
19080 +
19081 +module_init(pasemi_dma_init_module);
19082 +module_exit(pasemi_dma_cleanup_module);
19083 +
19084 +MODULE_LICENSE("Dual BSD/GPL");
19085 +MODULE_AUTHOR("Egor Martovetsky egor@pasemi.com");
19086 +MODULE_DESCRIPTION("OCF driver for PA Semi PWRficient DMA Crypto Engine");
19087 --- /dev/null   2007-07-04 13:54:27.000000000 +1000
19088 +++ linux/crypto/ocf/pasemi/pasemi_fnu.h        2007-12-12 11:36:18.000000000 +1000
19089 @@ -0,0 +1,410 @@
19090 +/*
19091 + * Copyright (C) 2007 PA Semi, Inc
19092 + *
19093 + * Driver for the PA Semi PWRficient DMA Crypto Engine, soft state and
19094 + * hardware register layouts.
19095 + *
19096 + * This program is free software; you can redistribute it and/or modify
19097 + * it under the terms of the GNU General Public License version 2 as
19098 + * published by the Free Software Foundation.
19099 + *
19100 + * This program is distributed in the hope that it will be useful,
19101 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
19102 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19103 + * GNU General Public License for more details.
19104 + *
19105 + * You should have received a copy of the GNU General Public License
19106 + * along with this program; if not, write to the Free Software
19107 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
19108 + */
19109 +
19110 +#ifndef PASEMI_FNU_H
19111 +#define PASEMI_FNU_H
19112 +
19113 +#include <linux/spinlock.h>
19114 +
19115 +#define        PASEMI_SESSION(sid)     ((sid) & 0xffffffff)
19116 +#define        PASEMI_SID(sesn)        ((sesn) & 0xffffffff)
19117 +#define        DPRINTF(a...)   if (debug) { printk(DRV_NAME ": " a); }
19118 +
19119 +/* Must be a power of two */
19120 +#define RX_RING_SIZE 512
19121 +#define TX_RING_SIZE 512
19122 +#define TX_DESC(ring, num)     ((ring)->desc[2 * (num & (TX_RING_SIZE-1))])
19123 +#define TX_DESC_INFO(ring, num)        ((ring)->desc_info[(num) & (TX_RING_SIZE-1)])
19124 +#define MAX_DESC_SIZE 8
19125 +#define PASEMI_INITIAL_SESSIONS 10
19126 +#define PASEMI_FNU_CHANNELS 8
19127 +
19128 +/* DMA descriptor */
19129 +struct pasemi_desc {
19130 +       u64 quad[2*MAX_DESC_SIZE];
19131 +       int quad_cnt;
19132 +       int size;
19133 +       int postop;
19134 +};
19135 +
19136 +/*
19137 + * Holds per descriptor data
19138 + */
19139 +struct pasemi_desc_info {
19140 +       int                     desc_size;
19141 +       int                     desc_postop;
19142 +#define PASEMI_CHECK_SIG 0x1
19143 +
19144 +       struct cryptop          *cf_crp;
19145 +};
19146 +
19147 +/*
19148 + * Holds per channel data
19149 + */
19150 +struct pasemi_fnu_txring {
19151 +       volatile u64            *desc;
19152 +       volatile struct
19153 +       pasemi_desc_info        *desc_info;
19154 +       dma_addr_t              dma;
19155 +       struct timer_list       crypto_timer;
19156 +       spinlock_t              fill_lock;
19157 +       spinlock_t              clean_lock;
19158 +       unsigned int            next_to_fill;
19159 +       unsigned int            next_to_clean;
19160 +       u16                     total_pktcnt;
19161 +       int                     irq;
19162 +       int                     sesn;
19163 +       char                    irq_name[10];
19164 +};
19165 +
19166 +/*
19167 + * Holds data specific to a single pasemi device.
19168 + */
19169 +struct pasemi_softc {
19170 +       softc_device_decl       sc_cdev;
19171 +       struct pci_dev          *dma_pdev;      /* device backpointer */
19172 +       struct pci_dev          *iob_pdev;      /* device backpointer */
19173 +       void __iomem            *dma_regs;
19174 +       void __iomem            *iob_regs;
19175 +       int                     base_irq;
19176 +       int                     base_chan;
19177 +       int32_t                 sc_cid;         /* crypto tag */
19178 +       int                     sc_nsessions;
19179 +       struct pasemi_session   **sc_sessions;
19180 +       int                     sc_num_channels;/* number of crypto channels */
19181 +
19182 +       /* pointer to the array of txring datastructures, one txring per channel */
19183 +       struct pasemi_fnu_txring *tx;
19184 +
19185 +       /*
19186 +        * mutual exclusion for the channel scheduler
19187 +        */
19188 +       spinlock_t              sc_chnlock;
19189 +       /* last channel used, for now use round-robin to allocate channels */
19190 +       int                     sc_lastchn;
19191 +};
19192 +
19193 +struct pasemi_session {
19194 +       u64 civ[2];
19195 +       u64 keysz;
19196 +       u64 key[4];
19197 +       u64 ccmd;
19198 +       u64 hkey[4];
19199 +       u64 hseq;
19200 +       u64 giv[2];
19201 +       u64 hiv[4];
19202 +
19203 +       int used;
19204 +       dma_addr_t      dma_addr;
19205 +       int chan;
19206 +};
19207 +
19208 +/* status register layout in IOB region, at 0xfd800000 */
19209 +struct pasdma_status {
19210 +       u64 rx_sta[64];
19211 +       u64 tx_sta[20];
19212 +};
19213 +
19214 +#define ALG_IS_CIPHER(alg) ((alg == CRYPTO_DES_CBC)            || \
19215 +                               (alg == CRYPTO_3DES_CBC)        || \
19216 +                               (alg == CRYPTO_AES_CBC)         || \
19217 +                               (alg == CRYPTO_ARC4)            || \
19218 +                               (alg == CRYPTO_NULL_CBC))
19219 +
19220 +#define ALG_IS_SIG(alg) ((alg == CRYPTO_MD5)                   || \
19221 +                               (alg == CRYPTO_MD5_HMAC)        || \
19222 +                               (alg == CRYPTO_SHA1)            || \
19223 +                               (alg == CRYPTO_SHA1_HMAC)       || \
19224 +                               (alg == CRYPTO_NULL_HMAC))
19225 +
19226 +enum {
19227 +       PAS_DMA_COM_TXCMD = 0x100,      /* Transmit Command Register  */
19228 +       PAS_DMA_COM_TXSTA = 0x104,      /* Transmit Status Register   */
19229 +       PAS_DMA_COM_RXCMD = 0x108,      /* Receive Command Register   */
19230 +       PAS_DMA_COM_RXSTA = 0x10c,      /* Receive Status Register    */
19231 +       PAS_DMA_COM_CFG   = 0x114,      /* DMA Configuration Register */
19232 +};
19233 +
19234 +/* All these registers live in the PCI configuration space for the DMA PCI
19235 + * device. Use the normal PCI config access functions for them.
19236 + */
19237 +
19238 +#define PAS_DMA_COM_CFG_FWF    0x18000000
19239 +
19240 +#define PAS_DMA_COM_TXCMD_EN   0x00000001 /* enable */
19241 +#define PAS_DMA_COM_TXSTA_ACT  0x00000001 /* active */
19242 +#define PAS_DMA_COM_RXCMD_EN   0x00000001 /* enable */
19243 +#define PAS_DMA_COM_RXSTA_ACT  0x00000001 /* active */
19244 +
19245 +#define _PAS_DMA_TXCHAN_STRIDE 0x20    /* Size per channel             */
19246 +#define _PAS_DMA_TXCHAN_TCMDSTA        0x300   /* Command / Status             */
19247 +#define _PAS_DMA_TXCHAN_CFG    0x304   /* Configuration                */
19248 +#define _PAS_DMA_TXCHAN_DSCRBU 0x308   /* Descriptor BU Allocation     */
19249 +#define _PAS_DMA_TXCHAN_INCR   0x310   /* Descriptor increment         */
19250 +#define _PAS_DMA_TXCHAN_CNT    0x314   /* Descriptor count/offset      */
19251 +#define _PAS_DMA_TXCHAN_BASEL  0x318   /* Descriptor ring base (low)   */
19252 +#define _PAS_DMA_TXCHAN_BASEU  0x31c   /*                      (high)  */
19253 +#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
19254 +#define    PAS_DMA_TXCHAN_TCMDSTA_EN   0x00000001      /* Enabled */
19255 +#define    PAS_DMA_TXCHAN_TCMDSTA_ST   0x00000002      /* Stop interface */
19256 +#define    PAS_DMA_TXCHAN_TCMDSTA_ACT  0x00010000      /* Active */
19257 +#define PAS_DMA_TXCHAN_CFG(c)     (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
19258 +#define    PAS_DMA_TXCHAN_CFG_TY_FUNC  0x00000002      /* Type = interface */
19259 +#define    PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000      /* Type = interface */
19260 +#define    PAS_DMA_TXCHAN_CFG_TATTR_M  0x0000003c
19261 +#define    PAS_DMA_TXCHAN_CFG_TATTR_S  2
19262 +#define    PAS_DMA_TXCHAN_CFG_TATTR(x) (((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
19263 +                                        PAS_DMA_TXCHAN_CFG_TATTR_M)
19264 +#define    PAS_DMA_TXCHAN_CFG_WT_M     0x000001c0
19265 +#define    PAS_DMA_TXCHAN_CFG_WT_S     6
19266 +#define    PAS_DMA_TXCHAN_CFG_WT(x)    (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
19267 +                                        PAS_DMA_TXCHAN_CFG_WT_M)
19268 +#define    PAS_DMA_TXCHAN_CFG_LPSQ_FAST        0x00000400
19269 +#define    PAS_DMA_TXCHAN_CFG_LPDQ_FAST        0x00000800
19270 +#define    PAS_DMA_TXCHAN_CFG_CF       0x00001000      /* Clean first line */
19271 +#define    PAS_DMA_TXCHAN_CFG_CL       0x00002000      /* Clean last line */
19272 +#define    PAS_DMA_TXCHAN_CFG_UP       0x00004000      /* update tx descr when sent */
19273 +#define PAS_DMA_TXCHAN_INCR(c)    (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
19274 +#define PAS_DMA_TXCHAN_BASEL(c)   (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
19275 +#define    PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0
19276 +#define    PAS_DMA_TXCHAN_BASEL_BRBL_S 0
19277 +#define    PAS_DMA_TXCHAN_BASEL_BRBL(x)        (((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
19278 +                                        PAS_DMA_TXCHAN_BASEL_BRBL_M)
19279 +#define PAS_DMA_TXCHAN_BASEU(c)   (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
19280 +#define    PAS_DMA_TXCHAN_BASEU_BRBH_M 0x00000fff
19281 +#define    PAS_DMA_TXCHAN_BASEU_BRBH_S 0
19282 +#define    PAS_DMA_TXCHAN_BASEU_BRBH(x)        (((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
19283 +                                        PAS_DMA_TXCHAN_BASEU_BRBH_M)
19284 +/* # of cache lines worth of buffer ring */
19285 +#define    PAS_DMA_TXCHAN_BASEU_SIZ_M  0x3fff0000
19286 +#define    PAS_DMA_TXCHAN_BASEU_SIZ_S  16              /* 0 = 16K */
19287 +#define    PAS_DMA_TXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
19288 +                                        PAS_DMA_TXCHAN_BASEU_SIZ_M)
19289 +
19290 +#define    PAS_STATUS_PCNT_M           0x000000000000ffffull
19291 +#define    PAS_STATUS_PCNT_S           0
19292 +#define    PAS_STATUS_DCNT_M           0x00000000ffff0000ull
19293 +#define    PAS_STATUS_DCNT_S           16
19294 +#define    PAS_STATUS_BPCNT_M          0x0000ffff00000000ull
19295 +#define    PAS_STATUS_BPCNT_S          32
19296 +#define    PAS_STATUS_CAUSE_M          0xf000000000000000ull
19297 +#define    PAS_STATUS_TIMER            0x1000000000000000ull
19298 +#define    PAS_STATUS_ERROR            0x2000000000000000ull
19299 +#define    PAS_STATUS_SOFT             0x4000000000000000ull
19300 +#define    PAS_STATUS_INT              0x8000000000000000ull
19301 +
19302 +#define PAS_IOB_DMA_RXCH_CFG(i)                (0x1100 + (i)*4)
19303 +#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_M                0x00000fff
19304 +#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_S                0
19305 +#define    PAS_IOB_DMA_RXCH_CFG_CNTTH(x)       (((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
19306 +                                                PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
19307 +#define PAS_IOB_DMA_TXCH_CFG(i)                (0x1200 + (i)*4)
19308 +#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_M                0x00000fff
19309 +#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_S                0
19310 +#define    PAS_IOB_DMA_TXCH_CFG_CNTTH(x)       (((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
19311 +                                                PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
19312 +#define PAS_IOB_DMA_RXCH_STAT(i)       (0x1300 + (i)*4)
19313 +#define    PAS_IOB_DMA_RXCH_STAT_INTGEN        0x00001000
19314 +#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_M      0x00000fff
19315 +#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_S      0
19316 +#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL(x)     (((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
19317 +                                                PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
19318 +#define PAS_IOB_DMA_TXCH_STAT(i)       (0x1400 + (i)*4)
19319 +#define    PAS_IOB_DMA_TXCH_STAT_INTGEN        0x00001000
19320 +#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_M      0x00000fff
19321 +#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_S      0
19322 +#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL(x)     (((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
19323 +                                                PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
19324 +#define PAS_IOB_DMA_RXCH_RESET(i)      (0x1500 + (i)*4)
19325 +#define    PAS_IOB_DMA_RXCH_RESET_PCNT_M       0xffff0000
19326 +#define    PAS_IOB_DMA_RXCH_RESET_PCNT_S       16
19327 +#define    PAS_IOB_DMA_RXCH_RESET_PCNT(x)      (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
19328 +                                                PAS_IOB_DMA_RXCH_RESET_PCNT_M)
19329 +#define    PAS_IOB_DMA_RXCH_RESET_PCNTRST      0x00000020
19330 +#define    PAS_IOB_DMA_RXCH_RESET_DCNTRST      0x00000010
19331 +#define    PAS_IOB_DMA_RXCH_RESET_TINTC                0x00000008
19332 +#define    PAS_IOB_DMA_RXCH_RESET_DINTC                0x00000004
19333 +#define    PAS_IOB_DMA_RXCH_RESET_SINTC                0x00000002
19334 +#define    PAS_IOB_DMA_RXCH_RESET_PINTC                0x00000001
19335 +#define PAS_IOB_DMA_TXCH_RESET(i)      (0x1600 + (i)*4)
19336 +#define    PAS_IOB_DMA_TXCH_RESET_PCNT_M       0xffff0000
19337 +#define    PAS_IOB_DMA_TXCH_RESET_PCNT_S       16
19338 +#define    PAS_IOB_DMA_TXCH_RESET_PCNT(x)      (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
19339 +                                                PAS_IOB_DMA_TXCH_RESET_PCNT_M)
19340 +#define    PAS_IOB_DMA_TXCH_RESET_PCNTRST      0x00000020
19341 +#define    PAS_IOB_DMA_TXCH_RESET_DCNTRST      0x00000010
19342 +#define    PAS_IOB_DMA_TXCH_RESET_TINTC                0x00000008
19343 +#define    PAS_IOB_DMA_TXCH_RESET_DINTC                0x00000004
19344 +#define    PAS_IOB_DMA_TXCH_RESET_SINTC                0x00000002
19345 +#define    PAS_IOB_DMA_TXCH_RESET_PINTC                0x00000001
19346 +
19347 +#define PAS_IOB_DMA_COM_TIMEOUTCFG             0x1700
19348 +#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M   0x00ffffff
19349 +#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S   0
19350 +#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x)  (((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
19351 +                                                PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
19352 +
19353 +/* Transmit descriptor fields */
19354 +#define        XCT_MACTX_T             0x8000000000000000ull
19355 +#define        XCT_MACTX_ST            0x4000000000000000ull
19356 +#define XCT_MACTX_NORES                0x0000000000000000ull
19357 +#define XCT_MACTX_8BRES                0x1000000000000000ull
19358 +#define XCT_MACTX_24BRES       0x2000000000000000ull
19359 +#define XCT_MACTX_40BRES       0x3000000000000000ull
19360 +#define XCT_MACTX_I            0x0800000000000000ull
19361 +#define XCT_MACTX_O            0x0400000000000000ull
19362 +#define XCT_MACTX_E            0x0200000000000000ull
19363 +#define XCT_MACTX_VLAN_M       0x0180000000000000ull
19364 +#define XCT_MACTX_VLAN_NOP     0x0000000000000000ull
19365 +#define XCT_MACTX_VLAN_REMOVE  0x0080000000000000ull
19366 +#define XCT_MACTX_VLAN_INSERT   0x0100000000000000ull
19367 +#define XCT_MACTX_VLAN_REPLACE  0x0180000000000000ull
19368 +#define XCT_MACTX_CRC_M                0x0060000000000000ull
19369 +#define XCT_MACTX_CRC_NOP      0x0000000000000000ull
19370 +#define XCT_MACTX_CRC_INSERT   0x0020000000000000ull
19371 +#define XCT_MACTX_CRC_PAD      0x0040000000000000ull
19372 +#define XCT_MACTX_CRC_REPLACE  0x0060000000000000ull
19373 +#define XCT_MACTX_SS           0x0010000000000000ull
19374 +#define XCT_MACTX_LLEN_M       0x00007fff00000000ull
19375 +#define XCT_MACTX_LLEN_S       32ull
19376 +#define XCT_MACTX_LLEN(x)      ((((long)(x)) << XCT_MACTX_LLEN_S) & \
19377 +                                XCT_MACTX_LLEN_M)
19378 +#define XCT_MACTX_IPH_M                0x00000000f8000000ull
19379 +#define XCT_MACTX_IPH_S                27ull
19380 +#define XCT_MACTX_IPH(x)       ((((long)(x)) << XCT_MACTX_IPH_S) & \
19381 +                                XCT_MACTX_IPH_M)
19382 +#define XCT_MACTX_IPO_M                0x0000000007c00000ull
19383 +#define XCT_MACTX_IPO_S                22ull
19384 +#define XCT_MACTX_IPO(x)       ((((long)(x)) << XCT_MACTX_IPO_S) & \
19385 +                                XCT_MACTX_IPO_M)
19386 +#define XCT_MACTX_CSUM_M       0x0000000000000060ull
19387 +#define XCT_MACTX_CSUM_NOP     0x0000000000000000ull
19388 +#define XCT_MACTX_CSUM_TCP     0x0000000000000040ull
19389 +#define XCT_MACTX_CSUM_UDP     0x0000000000000060ull
19390 +#define XCT_MACTX_V6           0x0000000000000010ull
19391 +#define XCT_MACTX_C            0x0000000000000004ull
19392 +#define XCT_MACTX_AL2          0x0000000000000002ull
19393 +
19394 +#define XCT_PTR_T              0x8000000000000000ull
19395 +#define XCT_PTR_LEN_M          0x7ffff00000000000ull
19396 +#define XCT_PTR_LEN_S          44
19397 +#define XCT_PTR_LEN(x)         ((((long)(x)) << XCT_PTR_LEN_S) & \
19398 +                                XCT_PTR_LEN_M)
19399 +#define XCT_PTR_ADDR_M         0x00000fffffffffffull
19400 +#define XCT_PTR_ADDR_S         0
19401 +#define XCT_PTR_ADDR(x)                ((((long)(x)) << XCT_PTR_ADDR_S) & \
19402 +                                XCT_PTR_ADDR_M)
19403 +
19404 +/* Function descriptor fields */
19405 +#define        XCT_FUN_T               0x8000000000000000ull
19406 +#define        XCT_FUN_ST              0x4000000000000000ull
19407 +#define XCT_FUN_NORES          0x0000000000000000ull
19408 +#define XCT_FUN_8BRES          0x1000000000000000ull
19409 +#define XCT_FUN_24BRES         0x2000000000000000ull
19410 +#define XCT_FUN_40BRES         0x3000000000000000ull
19411 +#define XCT_FUN_I              0x0800000000000000ull
19412 +#define XCT_FUN_O              0x0400000000000000ull
19413 +#define XCT_FUN_E              0x0200000000000000ull
19414 +#define XCT_FUN_FUN_S          54
19415 +#define XCT_FUN_FUN_M          0x01c0000000000000ull
19416 +#define XCT_FUN_FUN(num)       ((((long)(num)) << XCT_FUN_FUN_S) & \
19417 +                               XCT_FUN_FUN_M)
19418 +#define XCT_FUN_CRM_NOP                0x0000000000000000ull
19419 +#define XCT_FUN_CRM_SIG                0x0008000000000000ull
19420 +#define XCT_FUN_CRM_ENC                0x0010000000000000ull
19421 +#define XCT_FUN_CRM_DEC                0x0018000000000000ull
19422 +#define XCT_FUN_CRM_SIG_ENC    0x0020000000000000ull
19423 +#define XCT_FUN_CRM_ENC_SIG    0x0028000000000000ull
19424 +#define XCT_FUN_CRM_SIG_DEC    0x0030000000000000ull
19425 +#define XCT_FUN_CRM_DEC_SIG    0x0038000000000000ull
19426 +#define XCT_FUN_LLEN_M         0x0007ffff00000000ull
19427 +#define XCT_FUN_LLEN_S         32ULL
19428 +#define XCT_FUN_LLEN(x)                ((((long)(x)) << XCT_FUN_LLEN_S) & \
19429 +                                XCT_FUN_LLEN_M)
19430 +#define XCT_FUN_SHL_M          0x00000000f8000000ull
19431 +#define XCT_FUN_SHL_S          27ull
19432 +#define XCT_FUN_SHL(x)         ((((long)(x)) << XCT_FUN_SHL_S) & \
19433 +                                XCT_FUN_SHL_M)
19434 +#define XCT_FUN_CHL_M          0x0000000007c00000ull
19435 +#define XCT_FUN_CHL_S          22ull
19436 +#define XCT_FUN_CHL(x)         ((((long)(x)) << XCT_FUN_CHL_S) & \
19437 +                                XCT_FUN_CHL_M)
19438 +#define XCT_FUN_HSZ_M          0x00000000003c0000ull
19439 +#define XCT_FUN_HSZ_S          18ull
19440 +#define XCT_FUN_HSZ(x)         ((((long)(x)) << XCT_FUN_HSZ_S) & \
19441 +                                XCT_FUN_HSZ_M)
19442 +#define XCT_FUN_ALG_DES                0x0000000000000000ull
19443 +#define XCT_FUN_ALG_3DES       0x0000000000008000ull
19444 +#define XCT_FUN_ALG_AES                0x0000000000010000ull
19445 +#define XCT_FUN_ALG_ARC                0x0000000000018000ull
19446 +#define XCT_FUN_ALG_KASUMI     0x0000000000020000ull
19447 +#define XCT_FUN_BCM_ECB                0x0000000000000000ull
19448 +#define XCT_FUN_BCM_CBC                0x0000000000001000ull
19449 +#define XCT_FUN_BCM_CFB                0x0000000000002000ull
19450 +#define XCT_FUN_BCM_OFB                0x0000000000003000ull
19451 +#define XCT_FUN_BCM_CNT                0x0000000000003800ull
19452 +#define XCT_FUN_BCM_KAS_F8     0x0000000000002800ull
19453 +#define XCT_FUN_BCM_KAS_F9     0x0000000000001800ull
19454 +#define XCT_FUN_BCP_NO_PAD     0x0000000000000000ull
19455 +#define XCT_FUN_BCP_ZRO                0x0000000000000200ull
19456 +#define XCT_FUN_BCP_PL         0x0000000000000400ull
19457 +#define XCT_FUN_BCP_INCR       0x0000000000000600ull
19458 +#define XCT_FUN_SIG_MD5                (0ull << 4)
19459 +#define XCT_FUN_SIG_SHA1       (2ull << 4)
19460 +#define XCT_FUN_SIG_HMAC_MD5   (8ull << 4)
19461 +#define XCT_FUN_SIG_HMAC_SHA1  (10ull << 4)
19462 +#define XCT_FUN_A              0x0000000000000008ull
19463 +#define XCT_FUN_C              0x0000000000000004ull
19464 +#define XCT_FUN_AL2            0x0000000000000002ull
19465 +#define XCT_FUN_SE             0x0000000000000001ull
19466 +
19467 +#define XCT_FUN_SRC_PTR(len, addr)     (XCT_PTR_LEN(len) | XCT_PTR_ADDR(addr))
19468 +#define XCT_FUN_DST_PTR(len, addr)     (XCT_FUN_SRC_PTR(len, addr) | \
19469 +                                       0x8000000000000000ull)
19470 +
19471 +#define XCT_CTRL_HDR_FUN_NUM_M         0x01c0000000000000ull
19472 +#define XCT_CTRL_HDR_FUN_NUM_S         54
19473 +#define XCT_CTRL_HDR_LEN_M             0x0007ffff00000000ull
19474 +#define XCT_CTRL_HDR_LEN_S             32
19475 +#define XCT_CTRL_HDR_REG_M             0x00000000000000ffull
19476 +#define XCT_CTRL_HDR_REG_S             0
19477 +
19478 +#define XCT_CTRL_HDR(funcN,len,reg)    (0x9400000000000000ull | \
19479 +                       ((((long)(funcN)) << XCT_CTRL_HDR_FUN_NUM_S) \
19480 +                       & XCT_CTRL_HDR_FUN_NUM_M) | \
19481 +                       ((((long)(len)) << \
19482 +                       XCT_CTRL_HDR_LEN_S) & XCT_CTRL_HDR_LEN_M) | \
19483 +                       ((((long)(reg)) << \
19484 +                       XCT_CTRL_HDR_REG_S) & XCT_CTRL_HDR_REG_M))
19485 +
19486 +/* Function config command options */
19487 +#define        DMA_CALGO_DES                   0x00
19488 +#define        DMA_CALGO_3DES                  0x01
19489 +#define        DMA_CALGO_AES                   0x02
19490 +#define        DMA_CALGO_ARC                   0x03
19491 +
19492 +#define DMA_FN_CIV0                    0x02
19493 +#define DMA_FN_CIV1                    0x03
19494 +#define DMA_FN_HKEY0                   0x0a
19495 +
19496 +#define XCT_PTR_ADDR_LEN(ptr)          ((ptr) & XCT_PTR_ADDR_M), \
19497 +                       (((ptr) & XCT_PTR_LEN_M) >> XCT_PTR_LEN_S)
19498 +
19499 +#endif /* PASEMI_FNU_H */