diff options
Diffstat (limited to 'target/linux/generic-2.4/files/crypto/ocf/pasemi')
3 files changed, 1431 insertions, 0 deletions
diff --git a/target/linux/generic-2.4/files/crypto/ocf/pasemi/Makefile b/target/linux/generic-2.4/files/crypto/ocf/pasemi/Makefile new file mode 100644 index 0000000000..b0a3980f21 --- /dev/null +++ b/target/linux/generic-2.4/files/crypto/ocf/pasemi/Makefile @@ -0,0 +1,12 @@ +# for SGlinux builds +-include $(ROOTDIR)/modules/.config + +obj-$(CONFIG_OCF_PASEMI) += pasemi.o + +obj ?= . +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/ + +ifdef TOPDIR +-include $(TOPDIR)/Rules.make +endif + diff --git a/target/linux/generic-2.4/files/crypto/ocf/pasemi/pasemi.c b/target/linux/generic-2.4/files/crypto/ocf/pasemi/pasemi.c new file mode 100644 index 0000000000..c3bb9313d3 --- /dev/null +++ b/target/linux/generic-2.4/files/crypto/ocf/pasemi/pasemi.c @@ -0,0 +1,1009 @@ +/* + * Copyright (C) 2007 PA Semi, Inc + * + * Driver for the PA Semi PWRficient DMA Crypto Engine + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/random.h> +#include <linux/skbuff.h> +#include <asm/scatterlist.h> +#include <linux/moduleparam.h> +#include <linux/pci.h> +#include <cryptodev.h> +#include <uio.h> +#include "pasemi_fnu.h" + +#define DRV_NAME "pasemi" + +#define TIMER_INTERVAL 1000 + +static void __devexit pasemi_dma_remove(struct pci_dev *pdev); +static struct pasdma_status volatile * dma_status; + +static int debug; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "Enable debug"); + +static void pasemi_desc_start(struct pasemi_desc *desc, u64 hdr) +{ + desc->postop = 0; + desc->quad[0] = hdr; + desc->quad_cnt = 1; + desc->size = 1; +} + +static void pasemi_desc_build(struct pasemi_desc *desc, u64 val) +{ + desc->quad[desc->quad_cnt++] = val; + desc->size = (desc->quad_cnt + 1) / 2; +} + +static void pasemi_desc_hdr(struct pasemi_desc *desc, u64 hdr) +{ + desc->quad[0] |= hdr; +} + +static int pasemi_desc_size(struct pasemi_desc *desc) +{ + return desc->size; +} + +static void pasemi_ring_add_desc( + struct pasemi_fnu_txring *ring, + struct pasemi_desc *desc, + struct cryptop *crp) { + int i; + int ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1)); + + TX_DESC_INFO(ring, ring->next_to_fill).desc_size = desc->size; + TX_DESC_INFO(ring, ring->next_to_fill).desc_postop = desc->postop; + TX_DESC_INFO(ring, ring->next_to_fill).cf_crp = crp; + + for (i = 0; i < desc->quad_cnt; i += 2) { + ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1)); + ring->desc[ring_index] = desc->quad[i]; + ring->desc[ring_index + 1] = desc->quad[i + 1]; + ring->next_to_fill++; + } + + if (desc->quad_cnt & 1) + ring->desc[ring_index + 1] = 0; +} + +static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr) +{ + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index), + incr); +} + +/* + * Generate a new software session. + */ +static int +pasemi_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) +{ + struct cryptoini *c, *encini = NULL, *macini = NULL; + struct pasemi_softc *sc = device_get_softc(dev); + struct pasemi_session *ses = NULL, **sespp; + int sesn, blksz = 0; + u64 ccmd = 0; + unsigned long flags; + struct pasemi_desc init_desc; + struct pasemi_fnu_txring *txring; + + DPRINTF("%s()\n", __FUNCTION__); + if (sidp == NULL || cri == NULL || sc == NULL) { + DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__); + return -EINVAL; + } + for (c = cri; c != NULL; c = c->cri_next) { + if (ALG_IS_SIG(c->cri_alg)) { + if (macini) + return -EINVAL; + macini = c; + } else if (ALG_IS_CIPHER(c->cri_alg)) { + if (encini) + return -EINVAL; + encini = c; + } else { + DPRINTF("UNKNOWN c->cri_alg %d\n", c->cri_alg); + return -EINVAL; + } + } + if (encini == NULL && macini == NULL) + return -EINVAL; + if (encini) { + /* validate key length */ + switch (encini->cri_alg) { + case CRYPTO_DES_CBC: + if (encini->cri_klen != 64) + return -EINVAL; + ccmd = DMA_CALGO_DES; + break; + case CRYPTO_3DES_CBC: + if (encini->cri_klen != 192) + return -EINVAL; + ccmd = DMA_CALGO_3DES; + break; + case CRYPTO_AES_CBC: + if (encini->cri_klen != 128 && + encini->cri_klen != 192 && + encini->cri_klen != 256) + return -EINVAL; + ccmd = DMA_CALGO_AES; + break; + case CRYPTO_ARC4: + if (encini->cri_klen != 128) + return -EINVAL; + ccmd = DMA_CALGO_ARC; + break; + default: + DPRINTF("UNKNOWN encini->cri_alg %d\n", + encini->cri_alg); + return -EINVAL; + } + } + + if (macini) { + switch (macini->cri_alg) { + case CRYPTO_MD5: + case CRYPTO_MD5_HMAC: + blksz = 16; + break; + case CRYPTO_SHA1: + case CRYPTO_SHA1_HMAC: + blksz = 20; + break; + default: + DPRINTF("UNKNOWN macini->cri_alg %d\n", + macini->cri_alg); + return -EINVAL; + } + if (((macini->cri_klen + 7) / 8) > blksz) { + DPRINTF("key length %d bigger than blksize %d not supported\n", + ((macini->cri_klen + 7) / 8), blksz); + return -EINVAL; + } + } + + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { + if (sc->sc_sessions[sesn] == NULL) { + sc->sc_sessions[sesn] = (struct pasemi_session *) + kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC); + ses = sc->sc_sessions[sesn]; + break; + } else if (sc->sc_sessions[sesn]->used == 0) { + ses = sc->sc_sessions[sesn]; + break; + } + } + + if (ses == NULL) { + sespp = (struct pasemi_session **) + kzalloc(sc->sc_nsessions * 2 * + sizeof(struct pasemi_session *), GFP_ATOMIC); + if (sespp == NULL) + return -ENOMEM; + memcpy(sespp, sc->sc_sessions, + sc->sc_nsessions * sizeof(struct pasemi_session *)); + kfree(sc->sc_sessions); + sc->sc_sessions = sespp; + sesn = sc->sc_nsessions; + ses = sc->sc_sessions[sesn] = (struct pasemi_session *) + kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC); + if (ses == NULL) + return -ENOMEM; + sc->sc_nsessions *= 2; + } + + ses->used = 1; + + ses->dma_addr = pci_map_single(sc->dma_pdev, (void *) ses->civ, + sizeof(struct pasemi_session), DMA_TO_DEVICE); + + /* enter the channel scheduler */ + spin_lock_irqsave(&sc->sc_chnlock, flags); + + /* ARC4 has to be processed by the even channel */ + if (encini && (encini->cri_alg == CRYPTO_ARC4)) + ses->chan = sc->sc_lastchn & ~1; + else + ses->chan = sc->sc_lastchn; + sc->sc_lastchn = (sc->sc_lastchn + 1) % sc->sc_num_channels; + + spin_unlock_irqrestore(&sc->sc_chnlock, flags); + + txring = &sc->tx[ses->chan]; + + if (encini) { + ses->ccmd = ccmd; + + /* get an IV */ + /* XXX may read fewer than requested */ + get_random_bytes(ses->civ, sizeof(ses->civ)); + + ses->keysz = (encini->cri_klen - 63) / 64; + memcpy(ses->key, encini->cri_key, (ses->keysz + 1) * 8); + + pasemi_desc_start(&init_desc, + XCT_CTRL_HDR(ses->chan, (encini && macini) ? 0x68 : 0x40, DMA_FN_CIV0)); + pasemi_desc_build(&init_desc, + XCT_FUN_SRC_PTR((encini && macini) ? 0x68 : 0x40, ses->dma_addr)); + } + if (macini) { + if (macini->cri_alg == CRYPTO_MD5_HMAC || + macini->cri_alg == CRYPTO_SHA1_HMAC) + memcpy(ses->hkey, macini->cri_key, blksz); + else { + /* Load initialization constants(RFC 1321, 3174) */ + ses->hiv[0] = 0x67452301efcdab89ULL; + ses->hiv[1] = 0x98badcfe10325476ULL; + ses->hiv[2] = 0xc3d2e1f000000000ULL; + } + ses->hseq = 0ULL; + } + + spin_lock_irqsave(&txring->fill_lock, flags); + + if (((txring->next_to_fill + pasemi_desc_size(&init_desc)) - + txring->next_to_clean) > TX_RING_SIZE) { + spin_unlock_irqrestore(&txring->fill_lock, flags); + return ERESTART; + } + + if (encini) { + pasemi_ring_add_desc(txring, &init_desc, NULL); + pasemi_ring_incr(sc, ses->chan, + pasemi_desc_size(&init_desc)); + } + + txring->sesn = sesn; + spin_unlock_irqrestore(&txring->fill_lock, flags); + + *sidp = PASEMI_SID(sesn); + return 0; +} + +/* + * Deallocate a session. + */ +static int +pasemi_freesession(device_t dev, u_int64_t tid) +{ + struct pasemi_softc *sc = device_get_softc(dev); + int session; + u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; + + DPRINTF("%s()\n", __FUNCTION__); + + if (sc == NULL) + return -EINVAL; + session = PASEMI_SESSION(sid); + if (session >= sc->sc_nsessions || !sc->sc_sessions[session]) + return -EINVAL; + + pci_unmap_single(sc->dma_pdev, + sc->sc_sessions[session]->dma_addr, + sizeof(struct pasemi_session), DMA_TO_DEVICE); + memset(sc->sc_sessions[session], 0, + sizeof(struct pasemi_session)); + + return 0; +} + +static int +pasemi_process(device_t dev, struct cryptop *crp, int hint) +{ + + int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel; + struct pasemi_softc *sc = device_get_softc(dev); + struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; + caddr_t ivp; + struct pasemi_desc init_desc, work_desc; + struct pasemi_session *ses; + struct sk_buff *skb; + struct uio *uiop; + unsigned long flags; + struct pasemi_fnu_txring *txring; + + DPRINTF("%s()\n", __FUNCTION__); + + if (crp == NULL || crp->crp_callback == NULL || sc == NULL) + return -EINVAL; + + crp->crp_etype = 0; + if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions) + return -EINVAL; + + ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)]; + + crd1 = crp->crp_desc; + if (crd1 == NULL) { + err = -EINVAL; + goto errout; + } + crd2 = crd1->crd_next; + + if (ALG_IS_SIG(crd1->crd_alg)) { + maccrd = crd1; + if (crd2 == NULL) + enccrd = NULL; + else if (ALG_IS_CIPHER(crd2->crd_alg) && + (crd2->crd_flags & CRD_F_ENCRYPT) == 0) + enccrd = crd2; + else + goto erralg; + } else if (ALG_IS_CIPHER(crd1->crd_alg)) { + enccrd = crd1; + if (crd2 == NULL) + maccrd = NULL; + else if (ALG_IS_SIG(crd2->crd_alg) && + (crd1->crd_flags & CRD_F_ENCRYPT)) + maccrd = crd2; + else + goto erralg; + } else + goto erralg; + + chsel = ses->chan; + + txring = &sc->tx[chsel]; + + if (enccrd && !maccrd) { + if (enccrd->crd_alg == CRYPTO_ARC4) + reinit = 1; + reinit_size = 0x40; + srclen = crp->crp_ilen; + + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I + | XCT_FUN_FUN(chsel)); + if (enccrd->crd_flags & CRD_F_ENCRYPT) + pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC); + else + pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC); + } else if (enccrd && maccrd) { + if (enccrd->crd_alg == CRYPTO_ARC4) + reinit = 1; + reinit_size = 0x68; + + if (enccrd->crd_flags & CRD_F_ENCRYPT) { + /* Encrypt -> Authenticate */ + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG + | XCT_FUN_A | XCT_FUN_FUN(chsel)); + srclen = maccrd->crd_skip + maccrd->crd_len; + } else { + /* Authenticate -> Decrypt */ + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC + | XCT_FUN_24BRES | XCT_FUN_FUN(chsel)); + pasemi_desc_build(&work_desc, 0); + pasemi_desc_build(&work_desc, 0); + pasemi_desc_build(&work_desc, 0); + work_desc.postop = PASEMI_CHECK_SIG; + srclen = crp->crp_ilen; + } + + pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4)); + pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip)); + } else if (!enccrd && maccrd) { + srclen = maccrd->crd_len; + + pasemi_desc_start(&init_desc, + XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0)); + pasemi_desc_build(&init_desc, + XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey)); + + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG + | XCT_FUN_A | XCT_FUN_FUN(chsel)); + } + + if (enccrd) { + switch (enccrd->crd_alg) { + case CRYPTO_3DES_CBC: + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES | + XCT_FUN_BCM_CBC); + ivsize = sizeof(u64); + break; + case CRYPTO_DES_CBC: + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES | + XCT_FUN_BCM_CBC); + ivsize = sizeof(u64); + break; + case CRYPTO_AES_CBC: + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES | + XCT_FUN_BCM_CBC); + ivsize = 2 * sizeof(u64); + break; + case CRYPTO_ARC4: + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC); + ivsize = 0; + break; + default: + printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n", + enccrd->crd_alg); + err = -EINVAL; + goto errout; + } + + ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0]; + if (enccrd->crd_flags & CRD_F_ENCRYPT) { + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) + memcpy(ivp, enccrd->crd_iv, ivsize); + /* If IV is not present in the buffer already, it has to be copied there */ + if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) + crypto_copyback(crp->crp_flags, crp->crp_buf, + enccrd->crd_inject, ivsize, ivp); + } else { + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) + /* IV is provided expicitly in descriptor */ + memcpy(ivp, enccrd->crd_iv, ivsize); + else + /* IV is provided in the packet */ + crypto_copydata(crp->crp_flags, crp->crp_buf, + enccrd->crd_inject, ivsize, + ivp); + } + } + + if (maccrd) { + switch (maccrd->crd_alg) { + case CRYPTO_MD5: + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 | + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4)); + break; + case CRYPTO_SHA1: + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 | + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4)); + break; + case CRYPTO_MD5_HMAC: + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 | + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4)); + break; + case CRYPTO_SHA1_HMAC: + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 | + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4)); + break; + default: + printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n", + maccrd->crd_alg); + err = -EINVAL; + goto errout; + } + } + + if (crp->crp_flags & CRYPTO_F_SKBUF) { + /* using SKB buffers */ + skb = (struct sk_buff *)crp->crp_buf; + if (skb_shinfo(skb)->nr_frags) { + printk(DRV_NAME ": skb frags unimplemented\n"); + err = -EINVAL; + goto errout; + } + pasemi_desc_build( + &work_desc, + XCT_FUN_DST_PTR(skb->len, pci_map_single( + sc->dma_pdev, skb->data, + skb->len, DMA_TO_DEVICE))); + pasemi_desc_build( + &work_desc, + XCT_FUN_SRC_PTR( + srclen, pci_map_single( + sc->dma_pdev, skb->data, + srclen, DMA_TO_DEVICE))); + pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen)); + } else if (crp->crp_flags & CRYPTO_F_IOV) { + /* using IOV buffers */ + uiop = (struct uio *)crp->crp_buf; + if (uiop->uio_iovcnt > 1) { + printk(DRV_NAME ": iov frags unimplemented\n"); + err = -EINVAL; + goto errout; + } + + /* crp_olen is never set; always use crp_ilen */ + pasemi_desc_build( + &work_desc, + XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single( + sc->dma_pdev, + uiop->uio_iov->iov_base, + crp->crp_ilen, DMA_TO_DEVICE))); + pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen)); + + pasemi_desc_build( + &work_desc, + XCT_FUN_SRC_PTR(srclen, pci_map_single( + sc->dma_pdev, + uiop->uio_iov->iov_base, + srclen, DMA_TO_DEVICE))); + } else { + /* using contig buffers */ + pasemi_desc_build( + &work_desc, + XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single( + sc->dma_pdev, + crp->crp_buf, + crp->crp_ilen, DMA_TO_DEVICE))); + pasemi_desc_build( + &work_desc, + XCT_FUN_SRC_PTR(srclen, pci_map_single( + sc->dma_pdev, + crp->crp_buf, srclen, + DMA_TO_DEVICE))); + pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen)); + } + + spin_lock_irqsave(&txring->fill_lock, flags); + + if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) { + txring->sesn = PASEMI_SESSION(crp->crp_sid); + reinit = 1; + } + + if (enccrd) { + pasemi_desc_start(&init_desc, + XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0)); + pasemi_desc_build(&init_desc, + XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr)); + } + + if (((txring->next_to_fill + pasemi_desc_size(&init_desc) + + pasemi_desc_size(&work_desc)) - + txring->next_to_clean) > TX_RING_SIZE) { + spin_unlock_irqrestore(&txring->fill_lock, flags); + err = ERESTART; + goto errout; + } + + pasemi_ring_add_desc(txring, &init_desc, NULL); + pasemi_ring_add_desc(txring, &work_desc, crp); + + pasemi_ring_incr(sc, chsel, + pasemi_desc_size(&init_desc) + + pasemi_desc_size(&work_desc)); + + spin_unlock_irqrestore(&txring->fill_lock, flags); + + mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL); + + return 0; + +erralg: + printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n", + crd1->crd_alg, crd2->crd_alg); + err = -EINVAL; + +errout: + if (err != ERESTART) { + crp->crp_etype = err; + crypto_done(crp); + } + return err; +} + +static int pasemi_clean_tx(struct pasemi_softc *sc, int chan) +{ + int i, j, ring_idx; + struct pasemi_fnu_txring *ring = &sc->tx[chan]; + u16 delta_cnt; + int flags, loops = 10; + int desc_size; + struct cryptop *crp; + + spin_lock_irqsave(&ring->clean_lock, flags); + + while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan] + & PAS_STATUS_PCNT_M) - ring->total_pktcnt) + && loops--) { + + for (i = 0; i < delta_cnt; i++) { + desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size; + crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp; + if (crp) { + ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1)); + if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) { + /* Need to make sure signature matched, + * if not - return error */ + if (!(ring->desc[ring_idx + 1] & (1ULL << 63))) + crp->crp_etype = -EINVAL; + } + crypto_done(TX_DESC_INFO(ring, + ring->next_to_clean).cf_crp); + TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL; + pci_unmap_single( + sc->dma_pdev, + XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]), + PCI_DMA_TODEVICE); + + ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0; + + ring->next_to_clean++; + for (j = 1; j < desc_size; j++) { + ring_idx = 2 * + (ring->next_to_clean & + (TX_RING_SIZE-1)); + pci_unmap_single( + sc->dma_pdev, + XCT_PTR_ADDR_LEN(ring->desc[ring_idx]), + PCI_DMA_TODEVICE); + if (ring->desc[ring_idx + 1]) + pci_unmap_single( + sc->dma_pdev, + XCT_PTR_ADDR_LEN( + ring->desc[ + ring_idx + 1]), + PCI_DMA_TODEVICE); + ring->desc[ring_idx] = + ring->desc[ring_idx + 1] = 0; + ring->next_to_clean++; + } + } else { + for (j = 0; j < desc_size; j++) { + ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1)); + ring->desc[ring_idx] = + ring->desc[ring_idx + 1] = 0; + ring->next_to_clean++; + } + } + } + + ring->total_pktcnt += delta_cnt; + } + spin_unlock_irqrestore(&ring->clean_lock, flags); + + return 0; +} + +static void sweepup_tx(struct pasemi_softc *sc) +{ + int i; + + for (i = 0; i < sc->sc_num_channels; i++) + pasemi_clean_tx(sc, i); +} + +static irqreturn_t pasemi_intr(int irq, void *arg, struct pt_regs *regs) +{ + struct pasemi_softc *sc = arg; + unsigned int reg; + int chan = irq - sc->base_irq; + int chan_index = sc->base_chan + chan; + u64 stat = dma_status->tx_sta[chan_index]; + + DPRINTF("%s()\n", __FUNCTION__); + + if (!(stat & PAS_STATUS_CAUSE_M)) + return IRQ_NONE; + + pasemi_clean_tx(sc, chan); + + stat = dma_status->tx_sta[chan_index]; + + reg = PAS_IOB_DMA_TXCH_RESET_PINTC | + PAS_IOB_DMA_TXCH_RESET_PCNT(sc->tx[chan].total_pktcnt); + + if (stat & PAS_STATUS_SOFT) + reg |= PAS_IOB_DMA_RXCH_RESET_SINTC; + + out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), reg); + + + return IRQ_HANDLED; +} + +static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan) +{ + u32 val; + int chan_index = chan + sc->base_chan; + int ret; + struct pasemi_fnu_txring *ring; + + ring = &sc->tx[chan]; + + spin_lock_init(&ring->fill_lock); + spin_lock_init(&ring->clean_lock); + + ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) * + TX_RING_SIZE, GFP_KERNEL); + if (!ring->desc_info) + return -ENOMEM; + + /* Allocate descriptors */ + ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev, + TX_RING_SIZE * + 2 * sizeof(u64), + &ring->dma, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64)); + + out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30); + + ring->total_pktcnt = 0; + + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index), + PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma)); + + val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32); + val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2); + + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val); + + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index), + PAS_DMA_TXCHAN_CFG_TY_FUNC | + PAS_DMA_TXCHAN_CFG_TATTR(chan) | + PAS_DMA_TXCHAN_CFG_WT(2)); + + /* enable tx channel */ + out_le32(sc->dma_regs + + PAS_DMA_TXCHAN_TCMDSTA(chan_index), + PAS_DMA_TXCHAN_TCMDSTA_EN); + + out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index), + PAS_IOB_DMA_TXCH_CFG_CNTTH(1000)); + + ring->next_to_fill = 0; + ring->next_to_clean = 0; + + snprintf(ring->irq_name, sizeof(ring->irq_name), + "%s%d", "crypto", chan); + + ring->irq = irq_create_mapping(NULL, sc->base_irq + chan); + ret = request_irq(ring->irq, (irq_handler_t) + pasemi_intr, IRQF_DISABLED, ring->irq_name, sc); + if (ret) { + printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n", + ring->irq, ret); + ring->irq = -1; + return ret; + } + + setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc); + + return 0; +} + +static device_method_t pasemi_methods = { + /* crypto device methods */ + DEVMETHOD(cryptodev_newsession, pasemi_newsession), + DEVMETHOD(cryptodev_freesession, pasemi_freesession), + DEVMETHOD(cryptodev_process, pasemi_process), +}; + +/* Set up the crypto device structure, private data, + * and anything else we need before we start */ + +static int __devinit +pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct pasemi_softc *sc; + int ret, i; + + DPRINTF(KERN_ERR "%s()\n", __FUNCTION__); + + sc = kzalloc(sizeof(*sc), GFP_KERNEL); + if (!sc) + return -ENOMEM; + + softc_device_init(sc, DRV_NAME, 1, pasemi_methods); + + pci_set_drvdata(pdev, sc); + + spin_lock_init(&sc->sc_chnlock); + + sc->sc_sessions = (struct pasemi_session **) + kzalloc(PASEMI_INITIAL_SESSIONS * + sizeof(struct pasemi_session *), GFP_ATOMIC); + if (sc->sc_sessions == NULL) { + ret = -ENOMEM; + goto out; + } + + sc->sc_nsessions = PASEMI_INITIAL_SESSIONS; + sc->sc_lastchn = 0; + sc->base_irq = pdev->irq + 6; + sc->base_chan = 6; + sc->sc_cid = -1; + sc->dma_pdev = pdev; + + sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); + if (!sc->iob_pdev) { + dev_err(&pdev->dev, "Can't find I/O Bridge\n"); + ret = -ENODEV; + goto out; + } + + /* This is hardcoded and ugly, but we have some firmware versions + * who don't provide the register space in the device tree. Luckily + * they are at well-known locations so we can just do the math here. + */ + sc->dma_regs = + ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000); + sc->iob_regs = + ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000); + if (!sc->dma_regs || !sc->iob_regs) { + dev_err(&pdev->dev, "Can't map registers\n"); + ret = -ENODEV; + goto out; + } + + dma_status = __ioremap(0xfd800000, 0x1000, 0); + if (!dma_status) { + ret = -ENODEV; + dev_err(&pdev->dev, "Can't map dmastatus space\n"); + goto out; + } + + sc->tx = (struct pasemi_fnu_txring *) + kzalloc(sizeof(struct pasemi_fnu_txring) + * 8, GFP_KERNEL); + if (!sc->tx) { + ret = -ENOMEM; + goto out; + } + + /* Initialize the h/w */ + out_le32(sc->dma_regs + PAS_DMA_COM_CFG, + (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) | + PAS_DMA_COM_CFG_FWF)); + out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); + + for (i = 0; i < PASEMI_FNU_CHANNELS; i++) { + sc->sc_num_channels++; + ret = pasemi_dma_setup_tx_resources(sc, i); + if (ret) + goto out; + } + + sc->sc_cid = crypto_get_driverid(softc_get_device(sc), + CRYPTOCAP_F_HARDWARE); + if (sc->sc_cid < 0) { + printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n"); + ret = -ENXIO; + goto out; + } + + /* register algorithms with the framework */ + printk(DRV_NAME ":"); + + crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); + crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); + crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0); + crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); + crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); + crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); + crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); + + return 0; + +out: + pasemi_dma_remove(pdev); + return ret; +} + +#define MAX_RETRIES 5000 + +static void pasemi_free_tx_resources(struct pasemi_softc *sc, int chan) +{ + struct pasemi_fnu_txring *ring = &sc->tx[chan]; + int chan_index = chan + sc->base_chan; + int retries; + u32 stat; + + /* Stop the channel */ + out_le32(sc->dma_regs + + PAS_DMA_TXCHAN_TCMDSTA(chan_index), + PAS_DMA_TXCHAN_TCMDSTA_ST); + + for (retries = 0; retries < MAX_RETRIES; retries++) { + stat = in_le32(sc->dma_regs + + PAS_DMA_TXCHAN_TCMDSTA(chan_index)); + if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)) + break; + cond_resched(); + } + + if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT) + dev_err(&sc->dma_pdev->dev, "Failed to stop tx channel %d\n", + chan_index); + + /* Disable the channel */ + out_le32(sc->dma_regs + + PAS_DMA_TXCHAN_TCMDSTA(chan_index), + 0); + + if (ring->desc_info) + kfree((void *) ring->desc_info); + if (ring->desc) + dma_free_coherent(&sc->dma_pdev->dev, + TX_RING_SIZE * + 2 * sizeof(u64), + (void *) ring->desc, ring->dma); + if (ring->irq != -1) + free_irq(ring->irq, sc); + + del_timer(&ring->crypto_timer); +} + +static void __devexit pasemi_dma_remove(struct pci_dev *pdev) +{ + struct pasemi_softc *sc = pci_get_drvdata(pdev); + int i; + + DPRINTF("%s()\n", __FUNCTION__); + + if (sc->sc_cid >= 0) { + crypto_unregister_all(sc->sc_cid); + } + + if (sc->tx) { + for (i = 0; i < sc->sc_num_channels; i++) + pasemi_free_tx_resources(sc, i); + + kfree(sc->tx); + } + if (sc->sc_sessions) { + for (i = 0; i < sc->sc_nsessions; i++) + kfree(sc->sc_sessions[i]); + kfree(sc->sc_sessions); + } + if (sc->iob_pdev) + pci_dev_put(sc->iob_pdev); + if (sc->dma_regs) + iounmap(sc->dma_regs); + if (sc->iob_regs) + iounmap(sc->iob_regs); + kfree(sc); +} + +static struct pci_device_id pasemi_dma_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa007) }, +}; + +MODULE_DEVICE_TABLE(pci, pasemi_dma_pci_tbl); + +static struct pci_driver pasemi_dma_driver = { + .name = "pasemi_dma", + .id_table = pasemi_dma_pci_tbl, + .probe = pasemi_dma_probe, + .remove = __devexit_p(pasemi_dma_remove), +}; + +static void __exit pasemi_dma_cleanup_module(void) +{ + pci_unregister_driver(&pasemi_dma_driver); + __iounmap(dma_status); + dma_status = NULL; +} + +int pasemi_dma_init_module(void) +{ + return pci_register_driver(&pasemi_dma_driver); +} + +module_init(pasemi_dma_init_module); +module_exit(pasemi_dma_cleanup_module); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Egor Martovetsky egor@pasemi.com"); +MODULE_DESCRIPTION("OCF driver for PA Semi PWRficient DMA Crypto Engine"); diff --git a/target/linux/generic-2.4/files/crypto/ocf/pasemi/pasemi_fnu.h b/target/linux/generic-2.4/files/crypto/ocf/pasemi/pasemi_fnu.h new file mode 100644 index 0000000000..1a0dcc8bbd --- /dev/null +++ b/target/linux/generic-2.4/files/crypto/ocf/pasemi/pasemi_fnu.h @@ -0,0 +1,410 @@ +/* + * Copyright (C) 2007 PA Semi, Inc + * + * Driver for the PA Semi PWRficient DMA Crypto Engine, soft state and + * hardware register layouts. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef PASEMI_FNU_H +#define PASEMI_FNU_H + +#include <linux/spinlock.h> + +#define PASEMI_SESSION(sid) ((sid) & 0xffffffff) +#define PASEMI_SID(sesn) ((sesn) & 0xffffffff) +#define DPRINTF(a...) if (debug) { printk(DRV_NAME ": " a); } + +/* Must be a power of two */ +#define RX_RING_SIZE 512 +#define TX_RING_SIZE 512 +#define TX_DESC(ring, num) ((ring)->desc[2 * (num & (TX_RING_SIZE-1))]) +#define TX_DESC_INFO(ring, num) ((ring)->desc_info[(num) & (TX_RING_SIZE-1)]) +#define MAX_DESC_SIZE 8 +#define PASEMI_INITIAL_SESSIONS 10 +#define PASEMI_FNU_CHANNELS 8 + +/* DMA descriptor */ +struct pasemi_desc { + u64 quad[2*MAX_DESC_SIZE]; + int quad_cnt; + int size; + int postop; +}; + +/* + * Holds per descriptor data + */ +struct pasemi_desc_info { + int desc_size; + int desc_postop; +#define PASEMI_CHECK_SIG 0x1 + + struct cryptop *cf_crp; +}; + +/* + * Holds per channel data + */ +struct pasemi_fnu_txring { + volatile u64 *desc; + volatile struct + pasemi_desc_info *desc_info; + dma_addr_t dma; + struct timer_list crypto_timer; + spinlock_t fill_lock; + spinlock_t clean_lock; + unsigned int next_to_fill; + unsigned int next_to_clean; + u16 total_pktcnt; + int irq; + int sesn; + char irq_name[10]; +}; + +/* + * Holds data specific to a single pasemi device. + */ +struct pasemi_softc { + softc_device_decl sc_cdev; + struct pci_dev *dma_pdev; /* device backpointer */ + struct pci_dev *iob_pdev; /* device backpointer */ + void __iomem *dma_regs; + void __iomem *iob_regs; + int base_irq; + int base_chan; + int32_t sc_cid; /* crypto tag */ + int sc_nsessions; + struct pasemi_session **sc_sessions; + int sc_num_channels;/* number of crypto channels */ + + /* pointer to the array of txring datastructures, one txring per channel */ + struct pasemi_fnu_txring *tx; + + /* + * mutual exclusion for the channel scheduler + */ + spinlock_t sc_chnlock; + /* last channel used, for now use round-robin to allocate channels */ + int sc_lastchn; +}; + +struct pasemi_session { + u64 civ[2]; + u64 keysz; + u64 key[4]; + u64 ccmd; + u64 hkey[4]; + u64 hseq; + u64 giv[2]; + u64 hiv[4]; + + int used; + dma_addr_t dma_addr; + int chan; +}; + +/* status register layout in IOB region, at 0xfd800000 */ +struct pasdma_status { + u64 rx_sta[64]; + u64 tx_sta[20]; +}; + +#define ALG_IS_CIPHER(alg) ((alg == CRYPTO_DES_CBC) || \ + (alg == CRYPTO_3DES_CBC) || \ + (alg == CRYPTO_AES_CBC) || \ + (alg == CRYPTO_ARC4) || \ + (alg == CRYPTO_NULL_CBC)) + +#define ALG_IS_SIG(alg) ((alg == CRYPTO_MD5) || \ + (alg == CRYPTO_MD5_HMAC) || \ + (alg == CRYPTO_SHA1) || \ + (alg == CRYPTO_SHA1_HMAC) || \ + (alg == CRYPTO_NULL_HMAC)) + +enum { + PAS_DMA_COM_TXCMD = 0x100, /* Transmit Command Register */ + PAS_DMA_COM_TXSTA = 0x104, /* Transmit Status Register */ + PAS_DMA_COM_RXCMD = 0x108, /* Receive Command Register */ + PAS_DMA_COM_RXSTA = 0x10c, /* Receive Status Register */ + PAS_DMA_COM_CFG = 0x114, /* DMA Configuration Register */ +}; + +/* All these registers live in the PCI configuration space for the DMA PCI + * device. Use the normal PCI config access functions for them. + */ + +#define PAS_DMA_COM_CFG_FWF 0x18000000 + +#define PAS_DMA_COM_TXCMD_EN 0x00000001 /* enable */ +#define PAS_DMA_COM_TXSTA_ACT 0x00000001 /* active */ +#define PAS_DMA_COM_RXCMD_EN 0x00000001 /* enable */ +#define PAS_DMA_COM_RXSTA_ACT 0x00000001 /* active */ + +#define _PAS_DMA_TXCHAN_STRIDE 0x20 /* Size per channel */ +#define _PAS_DMA_TXCHAN_TCMDSTA 0x300 /* Command / Status */ +#define _PAS_DMA_TXCHAN_CFG 0x304 /* Configuration */ +#define _PAS_DMA_TXCHAN_DSCRBU 0x308 /* Descriptor BU Allocation */ +#define _PAS_DMA_TXCHAN_INCR 0x310 /* Descriptor increment */ +#define _PAS_DMA_TXCHAN_CNT 0x314 /* Descriptor count/offset */ +#define _PAS_DMA_TXCHAN_BASEL 0x318 /* Descriptor ring base (low) */ +#define _PAS_DMA_TXCHAN_BASEU 0x31c /* (high) */ +#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE) +#define PAS_DMA_TXCHAN_TCMDSTA_EN 0x00000001 /* Enabled */ +#define PAS_DMA_TXCHAN_TCMDSTA_ST 0x00000002 /* Stop interface */ +#define PAS_DMA_TXCHAN_TCMDSTA_ACT 0x00010000 /* Active */ +#define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE) +#define PAS_DMA_TXCHAN_CFG_TY_FUNC 0x00000002 /* Type = interface */ +#define PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000 /* Type = interface */ +#define PAS_DMA_TXCHAN_CFG_TATTR_M 0x0000003c +#define PAS_DMA_TXCHAN_CFG_TATTR_S 2 +#define PAS_DMA_TXCHAN_CFG_TATTR(x) (((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \ + PAS_DMA_TXCHAN_CFG_TATTR_M) +#define PAS_DMA_TXCHAN_CFG_WT_M 0x000001c0 +#define PAS_DMA_TXCHAN_CFG_WT_S 6 +#define PAS_DMA_TXCHAN_CFG_WT(x) (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \ + PAS_DMA_TXCHAN_CFG_WT_M) +#define PAS_DMA_TXCHAN_CFG_LPSQ_FAST 0x00000400 +#define PAS_DMA_TXCHAN_CFG_LPDQ_FAST 0x00000800 +#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */ +#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */ +#define PAS_DMA_TXCHAN_CFG_UP 0x00004000 /* update tx descr when sent */ +#define PAS_DMA_TXCHAN_INCR(c) (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE) +#define PAS_DMA_TXCHAN_BASEL(c) (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE) +#define PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0 +#define PAS_DMA_TXCHAN_BASEL_BRBL_S 0 +#define PAS_DMA_TXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \ + PAS_DMA_TXCHAN_BASEL_BRBL_M) +#define PAS_DMA_TXCHAN_BASEU(c) (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE) +#define PAS_DMA_TXCHAN_BASEU_BRBH_M 0x00000fff +#define PAS_DMA_TXCHAN_BASEU_BRBH_S 0 +#define PAS_DMA_TXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \ + PAS_DMA_TXCHAN_BASEU_BRBH_M) +/* # of cache lines worth of buffer ring */ +#define PAS_DMA_TXCHAN_BASEU_SIZ_M 0x3fff0000 +#define PAS_DMA_TXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */ +#define PAS_DMA_TXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \ + PAS_DMA_TXCHAN_BASEU_SIZ_M) + +#define PAS_STATUS_PCNT_M 0x000000000000ffffull +#define PAS_STATUS_PCNT_S 0 +#define PAS_STATUS_DCNT_M 0x00000000ffff0000ull +#define PAS_STATUS_DCNT_S 16 +#define PAS_STATUS_BPCNT_M 0x0000ffff00000000ull +#define PAS_STATUS_BPCNT_S 32 +#define PAS_STATUS_CAUSE_M 0xf000000000000000ull +#define PAS_STATUS_TIMER 0x1000000000000000ull +#define PAS_STATUS_ERROR 0x2000000000000000ull +#define PAS_STATUS_SOFT 0x4000000000000000ull +#define PAS_STATUS_INT 0x8000000000000000ull + +#define PAS_IOB_DMA_RXCH_CFG(i) (0x1100 + (i)*4) +#define PAS_IOB_DMA_RXCH_CFG_CNTTH_M 0x00000fff +#define PAS_IOB_DMA_RXCH_CFG_CNTTH_S 0 +#define PAS_IOB_DMA_RXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \ + PAS_IOB_DMA_RXCH_CFG_CNTTH_M) +#define PAS_IOB_DMA_TXCH_CFG(i) (0x1200 + (i)*4) +#define PAS_IOB_DMA_TXCH_CFG_CNTTH_M 0x00000fff +#define PAS_IOB_DMA_TXCH_CFG_CNTTH_S 0 +#define PAS_IOB_DMA_TXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \ + PAS_IOB_DMA_TXCH_CFG_CNTTH_M) +#define PAS_IOB_DMA_RXCH_STAT(i) (0x1300 + (i)*4) +#define PAS_IOB_DMA_RXCH_STAT_INTGEN 0x00001000 +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_M 0x00000fff +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_S 0 +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\ + PAS_IOB_DMA_RXCH_STAT_CNTDEL_M) +#define PAS_IOB_DMA_TXCH_STAT(i) (0x1400 + (i)*4) +#define PAS_IOB_DMA_TXCH_STAT_INTGEN 0x00001000 +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_M 0x00000fff +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_S 0 +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\ + PAS_IOB_DMA_TXCH_STAT_CNTDEL_M) +#define PAS_IOB_DMA_RXCH_RESET(i) (0x1500 + (i)*4) +#define PAS_IOB_DMA_RXCH_RESET_PCNT_M 0xffff0000 +#define PAS_IOB_DMA_RXCH_RESET_PCNT_S 16 +#define PAS_IOB_DMA_RXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \ + PAS_IOB_DMA_RXCH_RESET_PCNT_M) +#define PAS_IOB_DMA_RXCH_RESET_PCNTRST 0x00000020 +#define PAS_IOB_DMA_RXCH_RESET_DCNTRST 0x00000010 +#define PAS_IOB_DMA_RXCH_RESET_TINTC 0x00000008 +#define PAS_IOB_DMA_RXCH_RESET_DINTC 0x00000004 +#define PAS_IOB_DMA_RXCH_RESET_SINTC 0x00000002 +#define PAS_IOB_DMA_RXCH_RESET_PINTC 0x00000001 +#define PAS_IOB_DMA_TXCH_RESET(i) (0x1600 + (i)*4) +#define PAS_IOB_DMA_TXCH_RESET_PCNT_M 0xffff0000 +#define PAS_IOB_DMA_TXCH_RESET_PCNT_S 16 +#define PAS_IOB_DMA_TXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \ + PAS_IOB_DMA_TXCH_RESET_PCNT_M) +#define PAS_IOB_DMA_TXCH_RESET_PCNTRST 0x00000020 +#define PAS_IOB_DMA_TXCH_RESET_DCNTRST 0x00000010 +#define PAS_IOB_DMA_TXCH_RESET_TINTC 0x00000008 +#define PAS_IOB_DMA_TXCH_RESET_DINTC 0x00000004 +#define PAS_IOB_DMA_TXCH_RESET_SINTC 0x00000002 +#define PAS_IOB_DMA_TXCH_RESET_PINTC 0x00000001 + +#define PAS_IOB_DMA_COM_TIMEOUTCFG 0x1700 +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M 0x00ffffff +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S 0 +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x) (((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \ + PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M) + +/* Transmit descriptor fields */ +#define XCT_MACTX_T 0x8000000000000000ull +#define XCT_MACTX_ST 0x4000000000000000ull +#define XCT_MACTX_NORES 0x0000000000000000ull +#define XCT_MACTX_8BRES 0x1000000000000000ull +#define XCT_MACTX_24BRES 0x2000000000000000ull +#define XCT_MACTX_40BRES 0x3000000000000000ull +#define XCT_MACTX_I 0x0800000000000000ull +#define XCT_MACTX_O 0x0400000000000000ull +#define XCT_MACTX_E 0x0200000000000000ull +#define XCT_MACTX_VLAN_M 0x0180000000000000ull +#define XCT_MACTX_VLAN_NOP 0x0000000000000000ull +#define XCT_MACTX_VLAN_REMOVE 0x0080000000000000ull +#define XCT_MACTX_VLAN_INSERT 0x0100000000000000ull +#define XCT_MACTX_VLAN_REPLACE 0x0180000000000000ull +#define XCT_MACTX_CRC_M 0x0060000000000000ull +#define XCT_MACTX_CRC_NOP 0x0000000000000000ull +#define XCT_MACTX_CRC_INSERT 0x0020000000000000ull +#define XCT_MACTX_CRC_PAD 0x0040000000000000ull +#define XCT_MACTX_CRC_REPLACE 0x0060000000000000ull +#define XCT_MACTX_SS 0x0010000000000000ull +#define XCT_MACTX_LLEN_M 0x00007fff00000000ull +#define XCT_MACTX_LLEN_S 32ull +#define XCT_MACTX_LLEN(x) ((((long)(x)) << XCT_MACTX_LLEN_S) & \ + XCT_MACTX_LLEN_M) +#define XCT_MACTX_IPH_M 0x00000000f8000000ull +#define XCT_MACTX_IPH_S 27ull +#define XCT_MACTX_IPH(x) ((((long)(x)) << XCT_MACTX_IPH_S) & \ + XCT_MACTX_IPH_M) +#define XCT_MACTX_IPO_M 0x0000000007c00000ull +#define XCT_MACTX_IPO_S 22ull +#define XCT_MACTX_IPO(x) ((((long)(x)) << XCT_MACTX_IPO_S) & \ + XCT_MACTX_IPO_M) +#define XCT_MACTX_CSUM_M 0x0000000000000060ull +#define XCT_MACTX_CSUM_NOP 0x0000000000000000ull +#define XCT_MACTX_CSUM_TCP 0x0000000000000040ull +#define XCT_MACTX_CSUM_UDP 0x0000000000000060ull +#define XCT_MACTX_V6 0x0000000000000010ull +#define XCT_MACTX_C 0x0000000000000004ull +#define XCT_MACTX_AL2 0x0000000000000002ull + +#define XCT_PTR_T 0x8000000000000000ull +#define XCT_PTR_LEN_M 0x7ffff00000000000ull +#define XCT_PTR_LEN_S 44 +#define XCT_PTR_LEN(x) ((((long)(x)) << XCT_PTR_LEN_S) & \ + XCT_PTR_LEN_M) +#define XCT_PTR_ADDR_M 0x00000fffffffffffull +#define XCT_PTR_ADDR_S 0 +#define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \ + XCT_PTR_ADDR_M) + +/* Function descriptor fields */ +#define XCT_FUN_T 0x8000000000000000ull +#define XCT_FUN_ST 0x4000000000000000ull +#define XCT_FUN_NORES 0x0000000000000000ull +#define XCT_FUN_8BRES 0x1000000000000000ull +#define XCT_FUN_24BRES 0x2000000000000000ull +#define XCT_FUN_40BRES 0x3000000000000000ull +#define XCT_FUN_I 0x0800000000000000ull +#define XCT_FUN_O 0x0400000000000000ull +#define XCT_FUN_E 0x0200000000000000ull +#define XCT_FUN_FUN_S 54 +#define XCT_FUN_FUN_M 0x01c0000000000000ull +#define XCT_FUN_FUN(num) ((((long)(num)) << XCT_FUN_FUN_S) & \ + XCT_FUN_FUN_M) +#define XCT_FUN_CRM_NOP 0x0000000000000000ull +#define XCT_FUN_CRM_SIG 0x0008000000000000ull +#define XCT_FUN_CRM_ENC 0x0010000000000000ull +#define XCT_FUN_CRM_DEC 0x0018000000000000ull +#define XCT_FUN_CRM_SIG_ENC 0x0020000000000000ull +#define XCT_FUN_CRM_ENC_SIG 0x0028000000000000ull +#define XCT_FUN_CRM_SIG_DEC 0x0030000000000000ull +#define XCT_FUN_CRM_DEC_SIG 0x0038000000000000ull +#define XCT_FUN_LLEN_M 0x0007ffff00000000ull +#define XCT_FUN_LLEN_S 32ULL +#define XCT_FUN_LLEN(x) ((((long)(x)) << XCT_FUN_LLEN_S) & \ + XCT_FUN_LLEN_M) +#define XCT_FUN_SHL_M 0x00000000f8000000ull +#define XCT_FUN_SHL_S 27ull +#define XCT_FUN_SHL(x) ((((long)(x)) << XCT_FUN_SHL_S) & \ + XCT_FUN_SHL_M) +#define XCT_FUN_CHL_M 0x0000000007c00000ull +#define XCT_FUN_CHL_S 22ull +#define XCT_FUN_CHL(x) ((((long)(x)) << XCT_FUN_CHL_S) & \ + XCT_FUN_CHL_M) +#define XCT_FUN_HSZ_M 0x00000000003c0000ull +#define XCT_FUN_HSZ_S 18ull +#define XCT_FUN_HSZ(x) ((((long)(x)) << XCT_FUN_HSZ_S) & \ + XCT_FUN_HSZ_M) +#define XCT_FUN_ALG_DES 0x0000000000000000ull +#define XCT_FUN_ALG_3DES 0x0000000000008000ull +#define XCT_FUN_ALG_AES 0x0000000000010000ull +#define XCT_FUN_ALG_ARC 0x0000000000018000ull +#define XCT_FUN_ALG_KASUMI 0x0000000000020000ull +#define XCT_FUN_BCM_ECB 0x0000000000000000ull +#define XCT_FUN_BCM_CBC 0x0000000000001000ull +#define XCT_FUN_BCM_CFB 0x0000000000002000ull +#define XCT_FUN_BCM_OFB 0x0000000000003000ull +#define XCT_FUN_BCM_CNT 0x0000000000003800ull +#define XCT_FUN_BCM_KAS_F8 0x0000000000002800ull +#define XCT_FUN_BCM_KAS_F9 0x0000000000001800ull +#define XCT_FUN_BCP_NO_PAD 0x0000000000000000ull +#define XCT_FUN_BCP_ZRO 0x0000000000000200ull +#define XCT_FUN_BCP_PL 0x0000000000000400ull +#define XCT_FUN_BCP_INCR 0x0000000000000600ull +#define XCT_FUN_SIG_MD5 (0ull << 4) +#define XCT_FUN_SIG_SHA1 (2ull << 4) +#define XCT_FUN_SIG_HMAC_MD5 (8ull << 4) +#define XCT_FUN_SIG_HMAC_SHA1 (10ull << 4) +#define XCT_FUN_A 0x0000000000000008ull +#define XCT_FUN_C 0x0000000000000004ull +#define XCT_FUN_AL2 0x0000000000000002ull +#define XCT_FUN_SE 0x0000000000000001ull + +#define XCT_FUN_SRC_PTR(len, addr) (XCT_PTR_LEN(len) | XCT_PTR_ADDR(addr)) +#define XCT_FUN_DST_PTR(len, addr) (XCT_FUN_SRC_PTR(len, addr) | \ + 0x8000000000000000ull) + +#define XCT_CTRL_HDR_FUN_NUM_M 0x01c0000000000000ull +#define XCT_CTRL_HDR_FUN_NUM_S 54 +#define XCT_CTRL_HDR_LEN_M 0x0007ffff00000000ull +#define XCT_CTRL_HDR_LEN_S 32 +#define XCT_CTRL_HDR_REG_M 0x00000000000000ffull +#define XCT_CTRL_HDR_REG_S 0 + +#define XCT_CTRL_HDR(funcN,len,reg) (0x9400000000000000ull | \ + ((((long)(funcN)) << XCT_CTRL_HDR_FUN_NUM_S) \ + & XCT_CTRL_HDR_FUN_NUM_M) | \ + ((((long)(len)) << \ + XCT_CTRL_HDR_LEN_S) & XCT_CTRL_HDR_LEN_M) | \ + ((((long)(reg)) << \ + XCT_CTRL_HDR_REG_S) & XCT_CTRL_HDR_REG_M)) + +/* Function config command options */ +#define DMA_CALGO_DES 0x00 +#define DMA_CALGO_3DES 0x01 +#define DMA_CALGO_AES 0x02 +#define DMA_CALGO_ARC 0x03 + +#define DMA_FN_CIV0 0x02 +#define DMA_FN_CIV1 0x03 +#define DMA_FN_HKEY0 0x0a + +#define XCT_PTR_ADDR_LEN(ptr) ((ptr) & XCT_PTR_ADDR_M), \ + (((ptr) & XCT_PTR_LEN_M) >> XCT_PTR_LEN_S) + +#endif /* PASEMI_FNU_H */ |