clean up jffs2 config options
[openwrt.git] / target / linux / etrax / patches / cris / 003-drivers-cris.patch
1 diff -urN linux-2.6.19.2.orig/drivers/ide/cris/ide-cris.c linux-2.6.19.2.dev/drivers/ide/cris/ide-cris.c
2 --- linux-2.6.19.2.orig/drivers/ide/cris/ide-cris.c     2007-01-10 20:10:37.000000000 +0100
3 +++ linux-2.6.19.2.dev/drivers/ide/cris/ide-cris.c      2006-12-06 14:17:02.000000000 +0100
4 @@ -1,8 +1,8 @@
5 -/* $Id: cris-ide-driver.patch,v 1.1 2005/06/29 21:39:07 akpm Exp $
6 +/* $Id: ide-cris.c,v 1.10 2006/12/06 13:17:02 starvik Exp $
7   *
8   * Etrax specific IDE functions, like init and PIO-mode setting etc.
9   * Almost the entire ide.c is used for the rest of the Etrax ATA driver.
10 - * Copyright (c) 2000-2005 Axis Communications AB
11 + * Copyright (c) 2000-2006 Axis Communications AB
12   *
13   * Authors:    Bjorn Wesen        (initial version)
14   *             Mikael Starvik     (crisv32 port)
15 @@ -43,8 +43,8 @@
16  
17  #define IDE_REGISTER_TIMEOUT 300
18  
19 -#define LOWDB(x)
20 -#define D(x)
21 +#define LOWDB(x) 
22 +#define D(x) 
23  
24  enum /* Transfer types */
25  {
26 @@ -88,12 +88,50 @@
27  #define ATA_PIO0_STROBE 39
28  #define ATA_PIO0_HOLD    9
29  
30 -int
31 +/*
32 + * On ETRAX FS, an interrupt remains latched and active until ack:ed.
33 + * Further, ATA acks are without effect as long as INTRQ is asserted, as the
34 + * corresponding ATA interrupt is continuously set to active.  There will be a
35 + * clearing ack at the usual cris_ide_ack_intr call, but that serves just to
36 + * gracefully handle an actual spurious interrupt or similar situation (which
37 + * will cause an early return without further actions, see the ide_intr
38 + * function).
39 + *
40 + * However, the normal case at time of this writing is that nothing has
41 + * changed from when INTRQ was asserted until the cris_ide_ack_intr call; no
42 + * ATA registers written and no status register read, so INTRQ will *remain*
43 + * asserted, thus *another* interrupt will be latched, and will be seen as a
44 + * spurious interrupt after the "real" interrupt is serviced.  With lots of
45 + * ATA traffic (as in a trivial file-copy between two drives), this will trig
46 + * the condition desc->irqs_unhandled > 99900 in
47 + * kernel/irq/spurious.c:note_interrupt and the system will halt.
48 + *
49 + * To actually get rid of the interrupt corresponding to the current INTRQ
50 + * assertion, we make a second ack after the next ATA register read or write;
51 + * i.e. when INTRQ must be deasserted.  At that time, we don't have the hwif
52 + * pointer available, so we need to stash a local copy (safe, because it'll be
53 + * set and cleared within the same spin_lock_irqsave region).  The pointer
54 + * serves doubly as a boolean flag that an ack is needed.  The caller must
55 + * NULL the pointer after the "second ack".
56 + */
57 +
58 +static ide_hwif_t *hwif_to_ack;
59 +
60 +static int
61  cris_ide_ack_intr(ide_hwif_t* hwif)
62  {
63 -       reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2,
64 +       /*
65 +        * The interrupt is shared so we need to find the interface bit number
66 +        * to ack.  We define the ATA I/O register addresses to have the
67 +        * format of ata rw_ctrl2 register contents, conveniently holding this
68 +        * number.
69 +        */
70 +       reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, 
71                                  int, hwif->io_ports[0]);
72         REG_WR_INT(ata, regi_ata, rw_ack_intr, 1 << ctrl2.sel);
73 +
74 +       /* Prepare to ack again, see above. */
75 +       hwif_to_ack = hwif;
76         return 1;
77  }
78  
79 @@ -122,8 +160,24 @@
80  
81  static void
82  cris_ide_write_command(unsigned long command)
83 -{
84 +{      
85         REG_WR_INT(ata, regi_ata, rw_ctrl2, command); /* write data to the drive's register */
86 +
87 +       /*
88 +        * Perform a pending ack if needed; see hwif_ack definition.  Perhaps
89 +        * we should check closer that this call is really a part of the
90 +        * preparation to read the ATA status register or write to the ATA
91 +        * command register (causing deassert of INTRQ; see the ATA standard),
92 +        * but at time of this writing (and expected to sanely remain so), the
93 +        * first ATA register activity after an cris_ide_ack_intr call is
94 +        * certain to do exactly that.
95 +        */
96 +       if (hwif_to_ack) {
97 +               /* The drive may take this long to deassert INTRQ. */
98 +               ndelay(400);
99 +               cris_ide_ack_intr(hwif_to_ack);
100 +               hwif_to_ack = NULL;
101 +       }
102  }
103  
104  static void
105 @@ -160,8 +214,8 @@
106  {
107         reg_ata_rw_ctrl2 ctrl2 = {0};
108         ctrl2.addr = addr;
109 -       ctrl2.cs1 = cs1;
110 -       ctrl2.cs0 = cs0;
111 +       ctrl2.cs1 = !cs1;
112 +       ctrl2.cs0 = !cs0;
113         return REG_TYPE_CONV(int, reg_ata_rw_ctrl2, ctrl2);
114  }
115  
116 @@ -184,14 +238,14 @@
117  
118         intr_mask.bus0 = regk_ata_yes;
119         intr_mask.bus1 = regk_ata_yes;
120 -       intr_mask.bus2 = regk_ata_yes;
121 +       intr_mask.bus2 = regk_ata_yes;          
122         intr_mask.bus3 = regk_ata_yes;
123  
124         REG_WR(ata, regi_ata, rw_intr_mask, intr_mask);
125  
126         crisv32_request_dma(2, "ETRAX FS built-in ATA", DMA_VERBOSE_ON_ERROR, 0, dma_ata);
127         crisv32_request_dma(3, "ETRAX FS built-in ATA", DMA_VERBOSE_ON_ERROR, 0, dma_ata);
128 -
129 +        
130         crisv32_pinmux_alloc_fixed(pinmux_ata);
131         crisv32_pinmux_alloc_fixed(pinmux_ata0);
132         crisv32_pinmux_alloc_fixed(pinmux_ata1);
133 @@ -204,14 +258,15 @@
134         DMA_ENABLE(regi_dma3);
135  
136         DMA_WR_CMD (regi_dma2, regk_dma_set_w_size2);
137 -       DMA_WR_CMD (regi_dma3, regk_dma_set_w_size2);
138 +       DMA_WR_CMD (regi_dma3, regk_dma_set_w_size2);   
139  }
140  
141  static dma_descr_context mycontext __attribute__ ((__aligned__(32)));
142  
143  #define cris_dma_descr_type dma_descr_data
144 -#define cris_pio_read regk_ata_rd
145 -#define cris_ultra_mask 0x7
146 +#define cris_pio_read (regk_ata_rd << 24)
147 +#define cris_ultra_mask 0x0 /* 0x7 for UDMA */
148 +#define IRQ ATA_INTR_VECT
149  #define MAX_DESCR_SIZE 0xffffffffUL
150  
151  static unsigned long
152 @@ -226,6 +281,8 @@
153         d->buf = (char*)virt_to_phys(buf);
154         d->after = d->buf + len;
155         d->eol = last;
156 +       /* assume descriptors are consecutively placed in memory */
157 +       d->next = last ? 0 : (cris_dma_descr_type*)virt_to_phys(d+1);
158  }
159  
160  static void
161 @@ -237,8 +294,10 @@
162         mycontext.saved_data = (dma_descr_data*)virt_to_phys(d);
163         mycontext.saved_data_buf = d->buf;
164         /* start the dma channel */
165 +       if (dir)
166 +               flush_dma_context(&mycontext); // Cache bug workaround   
167         DMA_START_CONTEXT(dir ? regi_dma3 : regi_dma2, virt_to_phys(&mycontext));
168 -
169 +       
170         /* initiate a multi word dma read using PIO handshaking */
171         trf_cnt.cnt = len >> 1;
172         /* Due to a "feature" the transfer count has to be one extra word for UDMA. */
173 @@ -248,7 +307,7 @@
174  
175         ctrl2.rw = dir ? regk_ata_rd : regk_ata_wr;
176         ctrl2.trf_mode = regk_ata_dma;
177 -       ctrl2.hsh = type == TYPE_PIO ? regk_ata_pio :
178 +       ctrl2.hsh = type == TYPE_PIO ? regk_ata_pio : 
179                     type == TYPE_DMA ? regk_ata_dma : regk_ata_udma;
180         ctrl2.multi = regk_ata_yes;
181         ctrl2.dma_size = regk_ata_word;
182 @@ -339,7 +398,7 @@
183  #define ATA_PIO0_STROBE 19
184  #define ATA_PIO0_HOLD    4
185  
186 -int
187 +int 
188  cris_ide_ack_intr(ide_hwif_t* hwif)
189  {
190         return 1;
191 @@ -348,13 +407,13 @@
192  static inline int
193  cris_ide_busy(void)
194  {
195 -       return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy) ;
196 +       return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy) ; 
197  }
198  
199  static inline int
200  cris_ide_ready(void)
201  {
202 -       return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, tr_rdy) ;
203 +       return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, tr_rdy) ; 
204  }
205  
206  static inline int
207 @@ -364,12 +423,12 @@
208         *data = (unsigned short)status;
209         return status & IO_MASK(R_ATA_STATUS_DATA, dav);
210  }
211 -
212 +       
213  static void
214  cris_ide_write_command(unsigned long command)
215  {
216 -       *R_ATA_CTRL_DATA = command;
217 -}
218 +       *R_ATA_CTRL_DATA = command; 
219 +}              
220  
221  static void
222  cris_ide_set_speed(int type, int setup, int strobe, int hold)
223 @@ -406,8 +465,8 @@
224  cris_ide_reg_addr(unsigned long addr, int cs0, int cs1)
225  {
226         return IO_FIELD(R_ATA_CTRL_DATA, addr, addr) |
227 -              IO_FIELD(R_ATA_CTRL_DATA, cs0, cs0) |
228 -              IO_FIELD(R_ATA_CTRL_DATA, cs1, cs1);
229 +              IO_FIELD(R_ATA_CTRL_DATA, cs0, cs0 ? 0 : 1) |
230 +              IO_FIELD(R_ATA_CTRL_DATA, cs1, cs1 ? 0 : 1);
231  }
232  
233  static __init void
234 @@ -484,6 +543,7 @@
235  #define cris_dma_descr_type etrax_dma_descr
236  #define cris_pio_read IO_STATE(R_ATA_CTRL_DATA, rw, read)
237  #define cris_ultra_mask 0x0
238 +#define IRQ 4
239  #define MAX_DESCR_SIZE 0x10000UL
240  
241  static unsigned long
242 @@ -497,8 +557,8 @@
243  {
244         d->buf = virt_to_phys(buf);
245         d->sw_len = len == MAX_DESCR_SIZE ? 0 : len;
246 -       if (last)
247 -               d->ctrl |= d_eol;
248 +       d->ctrl = last ? d_eol : 0;
249 +       d->next = last ? 0 : virt_to_phys(d+1); /* assumes descr's in array */
250  }
251  
252  static void cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir, int type, int len)
253 @@ -521,14 +581,14 @@
254                 *R_DMA_CH2_FIRST = virt_to_phys(d);
255                 *R_DMA_CH2_CMD   = IO_STATE(R_DMA_CH2_CMD, cmd, start);
256         }
257 -
258 +       
259         /* initiate a multi word dma read using DMA handshaking */
260  
261         *R_ATA_TRANSFER_CNT =
262                 IO_FIELD(R_ATA_TRANSFER_CNT, count, len >> 1);
263  
264         cmd = dir ? IO_STATE(R_ATA_CTRL_DATA, rw, read) : IO_STATE(R_ATA_CTRL_DATA, rw, write);
265 -       cmd |= type == TYPE_PIO ? IO_STATE(R_ATA_CTRL_DATA, handsh, pio) :
266 +       cmd |= type == TYPE_PIO ? IO_STATE(R_ATA_CTRL_DATA, handsh, pio) : 
267                                   IO_STATE(R_ATA_CTRL_DATA, handsh, dma);
268         *R_ATA_CTRL_DATA =
269                 cmd |
270 @@ -570,7 +630,7 @@
271  }
272  
273  #endif
274 -
275 +               
276  void
277  cris_ide_outw(unsigned short data, unsigned long reg) {
278         int timeleft;
279 @@ -597,7 +657,7 @@
280         if(!timeleft)
281                 printk("ATA timeout reg 0x%lx := 0x%x\n", reg, data);
282  
283 -       cris_ide_write_command(reg|data); /* write data to the drive's register */
284 +       cris_ide_write_command(reg|data); /* write data to the drive's register */ 
285  
286         timeleft = IDE_REGISTER_TIMEOUT;
287         /* wait for transmitter ready */
288 @@ -684,13 +744,15 @@
289  static void cris_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int);
290  static int cris_dma_off (ide_drive_t *drive);
291  static int cris_dma_on (ide_drive_t *drive);
292 +static int cris_dma_host_off (ide_drive_t *drive);
293 +static int cris_dma_host_on (ide_drive_t *drive);
294  
295  static void tune_cris_ide(ide_drive_t *drive, u8 pio)
296  {
297         int setup, strobe, hold;
298  
299         switch(pio)
300 -       {
301 +       {       
302                 case 0:
303                         setup = ATA_PIO0_SETUP;
304                         strobe = ATA_PIO0_STROBE;
305 @@ -715,7 +777,7 @@
306                         setup = ATA_PIO4_SETUP;
307                         strobe = ATA_PIO4_STROBE;
308                         hold = ATA_PIO4_HOLD;
309 -                       break;
310 +                       break;    
311                 default:
312                         return;
313         }
314 @@ -733,7 +795,7 @@
315         }
316  
317         switch(speed)
318 -       {
319 +       {       
320                 case XFER_UDMA_0:
321                         cyc = ATA_UDMA0_CYC;
322                         dvs = ATA_UDMA0_DVS;
323 @@ -765,7 +827,7 @@
324         if (speed >= XFER_UDMA_0)
325                 cris_ide_set_speed(TYPE_UDMA, cyc, dvs, 0);
326         else
327 -               cris_ide_set_speed(TYPE_DMA, 0, strobe, hold);
328 +               cris_ide_set_speed(TYPE_DMA, 0, strobe, hold);  
329  
330         return 0;
331  }
332 @@ -790,11 +852,13 @@
333  
334         for(h = 0; h < MAX_HWIFS; h++) {
335                 ide_hwif_t *hwif = &ide_hwifs[h];
336 -               ide_setup_ports(&hw, cris_ide_base_address(h),
337 +               memset(&hw, 0, sizeof(hw));
338 +               ide_setup_ports(&hw, cris_ide_base_address(h), 
339                                 ide_offsets,
340                                 0, 0, cris_ide_ack_intr,
341 -                               ide_default_irq(0));
342 +                               IRQ);
343                 ide_register_hw(&hw, &hwif);
344 +               hwif->irq = IRQ;
345                 hwif->mmio = 2;
346                 hwif->chipset = ide_etrax100;
347                 hwif->tuneproc = &tune_cris_ide;
348 @@ -814,13 +878,15 @@
349                 hwif->OUTBSYNC = &cris_ide_outbsync;
350                 hwif->INB = &cris_ide_inb;
351                 hwif->INW = &cris_ide_inw;
352 -               hwif->ide_dma_host_off = &cris_dma_off;
353 -               hwif->ide_dma_host_on = &cris_dma_on;
354 +               hwif->ide_dma_host_off = &cris_dma_host_off;
355 +               hwif->ide_dma_host_on = &cris_dma_host_on;
356                 hwif->ide_dma_off_quietly = &cris_dma_off;
357 +               hwif->ide_dma_on = &cris_dma_on;
358                 hwif->udma_four = 0;
359                 hwif->ultra_mask = cris_ultra_mask;
360                 hwif->mwdma_mask = 0x07; /* Multiword DMA 0-2 */
361                 hwif->swdma_mask = 0x07; /* Singleword DMA 0-2 */
362 +               hwif->rqsize = 256;
363         }
364  
365         /* Reset pulse */
366 @@ -835,13 +901,25 @@
367         cris_ide_set_speed(TYPE_UDMA, ATA_UDMA2_CYC, ATA_UDMA2_DVS, 0);
368  }
369  
370 +static int cris_dma_host_off (ide_drive_t *drive)
371 +{
372 +       return 0;
373 +}
374 +
375 +static int cris_dma_host_on (ide_drive_t *drive)
376 +{
377 +       return 0;
378 +}
379 +
380  static int cris_dma_off (ide_drive_t *drive)
381  {
382 +       drive->using_dma = 0;
383         return 0;
384  }
385  
386  static int cris_dma_on (ide_drive_t *drive)
387  {
388 +       drive->using_dma = 1;
389         return 0;
390  }
391  
392 @@ -958,30 +1036,28 @@
393                         size += sg_dma_len(sg);
394                 }
395  
396 -               /* did we run out of descriptors? */
397 -
398 -               if(count >= MAX_DMA_DESCRS) {
399 -                       printk("%s: too few DMA descriptors\n", drive->name);
400 -                       return 1;
401 -               }
402 -
403 -               /* however, this case is more difficult - rw_trf_cnt cannot be more
404 -                  than 65536 words per transfer, so in that case we need to either
405 +               /* rw_trf_cnt cannot be more than 131072 words per transfer, 
406 +                  (- 1 word for UDMA CRC) so in that case we need to either:
407                    1) use a DMA interrupt to re-trigger rw_trf_cnt and continue with
408                       the descriptors, or
409                    2) simply do the request here, and get dma_intr to only ide_end_request on
410                       those blocks that were actually set-up for transfer.
411 +                     (The ide framework will issue a new request for the remainder)
412                 */
413  
414 -               if(ata_tot_size + size > 131072) {
415 +               if(ata_tot_size + size > 262140) {
416                         printk("too large total ATA DMA request, %d + %d!\n", ata_tot_size, (int)size);
417                         return 1;
418                 }
419  
420 -               /* If size > MAX_DESCR_SIZE it has to be splitted into new descriptors. Since we
421 -                   don't handle size > 131072 only one split is necessary */
422 +               /* If size > MAX_DESCR_SIZE it has to be splitted into new descriptors. */
423  
424 -               if(size > MAX_DESCR_SIZE) {
425 +               while (size > MAX_DESCR_SIZE) {
426 +                       /* did we run out of descriptors? */
427 +                       if(count >= MAX_DMA_DESCRS) {
428 +                               printk("%s: too few DMA descriptors\n", drive->name);
429 +                               return 1;
430 +                       }
431                         cris_ide_fill_descriptor(&ata_descrs[count], (void*)addr, MAX_DESCR_SIZE, 0);
432                         count++;
433                         ata_tot_size += MAX_DESCR_SIZE;
434 @@ -989,6 +1065,11 @@
435                         addr += MAX_DESCR_SIZE;
436                 }
437  
438 +               /* did we run out of descriptors? */
439 +               if(count >= MAX_DMA_DESCRS) {
440 +                       printk("%s: too few DMA descriptors\n", drive->name);
441 +                       return 1;
442 +               }
443                 cris_ide_fill_descriptor(&ata_descrs[count], (void*)addr, size,i ? 0 : 1);
444                 count++;
445                 ata_tot_size += size;
446 @@ -1050,8 +1131,12 @@
447  
448         if (id && (id->capability & 1)) {
449                 if (ide_use_dma(drive)) {
450 -                       if (cris_config_drive_for_dma(drive))
451 -                               return hwif->ide_dma_on(drive);
452 +                       if (cris_config_drive_for_dma(drive)) {
453 +                               if (hwif->ide_dma_on)
454 +                                       return hwif->ide_dma_on(drive);
455 +                               else
456 +                                       return 1;
457 +                       }
458                 }
459         }
460  
461 --- linux-2.6.19.2.orig/drivers/serial/crisv10.c        2007-01-10 20:10:37.000000000 +0100
462 +++ linux-2.6.19.2.dev/drivers/serial/crisv10.c 2007-01-09 10:30:54.000000000 +0100
463 @@ -2,7 +2,7 @@
464   *
465   * Serial port driver for the ETRAX 100LX chip
466   *
467 - *    Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003  Axis Communications AB
468 + *    Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004  Axis Communications AB
469   *
470   *    Many, many authors. Based once upon a time on serial.c for 16x50.
471   *
472 @@ -445,6 +445,7 @@
473  
474  #include <asm/io.h>
475  #include <asm/irq.h>
476 +#include <asm/dma.h>
477  #include <asm/system.h>
478  #include <asm/bitops.h>
479  #include <linux/delay.h>
480 @@ -454,8 +455,9 @@
481  /* non-arch dependent serial structures are in linux/serial.h */
482  #include <linux/serial.h>
483  /* while we keep our own stuff (struct e100_serial) in a local .h file */
484 -#include "serial.h"
485 +#include "crisv10.h"
486  #include <asm/fasttimer.h>
487 +#include <asm/arch/io_interface_mux.h>
488  
489  #ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
490  #ifndef CONFIG_ETRAX_FAST_TIMER
491 @@ -586,11 +588,10 @@
492  static void change_speed(struct e100_serial *info);
493  static void rs_throttle(struct tty_struct * tty);
494  static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
495 -static int rs_write(struct tty_struct * tty, int from_user,
496 +static int rs_write(struct tty_struct * tty,
497                      const unsigned char *buf, int count);
498  #ifdef CONFIG_ETRAX_RS485
499 -static int e100_write_rs485(struct tty_struct * tty, int from_user,
500 -                            const unsigned char *buf, int count);
501 +static int e100_write_rs485(struct tty_struct * tty, const unsigned char *buf, int count);
502  #endif
503  static int get_lsr_info(struct e100_serial * info, unsigned int *value);
504  
505 @@ -677,20 +678,39 @@
506           .rx_ctrl     = DEF_RX,
507           .tx_ctrl     = DEF_TX,
508           .iseteop     = 2,
509 +         .dma_owner   = dma_ser0,
510 +         .io_if       = if_serial_0,
511  #ifdef CONFIG_ETRAX_SERIAL_PORT0
512            .enabled  = 1,
513  #ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
514           .dma_out_enabled = 1,
515 +         .dma_out_nbr = SER0_TX_DMA_NBR,
516 +         .dma_out_irq_nbr = SER0_DMA_TX_IRQ_NBR,
517 +         .dma_out_irq_flags = IRQF_DISABLED,
518 +         .dma_out_irq_description = "serial 0 dma tr",
519  #else
520           .dma_out_enabled = 0,
521 +         .dma_out_nbr = UINT_MAX,
522 +         .dma_out_irq_nbr = 0,
523 +         .dma_out_irq_flags = 0,
524 +         .dma_out_irq_description = NULL,
525  #endif
526  #ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
527           .dma_in_enabled = 1,
528 +         .dma_in_nbr = SER0_RX_DMA_NBR,
529 +         .dma_in_irq_nbr = SER0_DMA_RX_IRQ_NBR,
530 +         .dma_in_irq_flags = IRQF_DISABLED,
531 +         .dma_in_irq_description = "serial 0 dma rec",
532  #else
533 -         .dma_in_enabled = 0
534 +         .dma_in_enabled = 0,
535 +         .dma_in_nbr = UINT_MAX,
536 +         .dma_in_irq_nbr = 0,
537 +         .dma_in_irq_flags = 0,
538 +         .dma_in_irq_description = NULL,
539  #endif
540  #else
541            .enabled  = 0,
542 +         .io_if_description = NULL,
543           .dma_out_enabled = 0,
544           .dma_in_enabled = 0
545  #endif
546 @@ -712,20 +732,42 @@
547           .rx_ctrl     = DEF_RX,
548           .tx_ctrl     = DEF_TX,
549           .iseteop     = 3,
550 +         .dma_owner   = dma_ser1,
551 +         .io_if       = if_serial_1,
552  #ifdef CONFIG_ETRAX_SERIAL_PORT1
553            .enabled  = 1,
554 +         .io_if_description = "ser1",
555  #ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT
556           .dma_out_enabled = 1,
557 +         .dma_out_nbr = SER1_TX_DMA_NBR,
558 +         .dma_out_irq_nbr = SER1_DMA_TX_IRQ_NBR,
559 +         .dma_out_irq_flags = IRQF_DISABLED,
560 +         .dma_out_irq_description = "serial 1 dma tr",
561  #else
562           .dma_out_enabled = 0,
563 +         .dma_out_nbr = UINT_MAX,
564 +         .dma_out_irq_nbr = 0,
565 +         .dma_out_irq_flags = 0,
566 +         .dma_out_irq_description = NULL,
567  #endif
568  #ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN
569           .dma_in_enabled = 1,
570 +         .dma_in_nbr = SER1_RX_DMA_NBR,
571 +         .dma_in_irq_nbr = SER1_DMA_RX_IRQ_NBR,
572 +         .dma_in_irq_flags = IRQF_DISABLED,
573 +         .dma_in_irq_description = "serial 1 dma rec",
574  #else
575 -         .dma_in_enabled = 0
576 +         .dma_in_enabled = 0,
577 +         .dma_in_enabled = 0,
578 +         .dma_in_nbr = UINT_MAX,
579 +         .dma_in_irq_nbr = 0,
580 +         .dma_in_irq_flags = 0,
581 +         .dma_in_irq_description = NULL,
582  #endif
583  #else
584            .enabled  = 0,
585 +         .io_if_description = NULL,
586 +         .dma_in_irq_nbr = 0,
587           .dma_out_enabled = 0,
588           .dma_in_enabled = 0
589  #endif
590 @@ -746,20 +788,40 @@
591           .rx_ctrl     = DEF_RX,
592           .tx_ctrl     = DEF_TX,
593           .iseteop     = 0,
594 +         .dma_owner   = dma_ser2,
595 +         .io_if       = if_serial_2,
596  #ifdef CONFIG_ETRAX_SERIAL_PORT2
597            .enabled  = 1,
598 +         .io_if_description = "ser2",
599  #ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
600           .dma_out_enabled = 1,
601 +         .dma_out_nbr = SER2_TX_DMA_NBR,
602 +         .dma_out_irq_nbr = SER2_DMA_TX_IRQ_NBR,
603 +         .dma_out_irq_flags = IRQF_DISABLED,
604 +         .dma_out_irq_description = "serial 2 dma tr",
605  #else
606           .dma_out_enabled = 0,
607 +         .dma_in_nbr = UINT_MAX,
608 +         .dma_in_irq_nbr = 0,
609 +         .dma_in_irq_flags = 0,
610 +         .dma_in_irq_description = NULL,
611  #endif
612  #ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
613           .dma_in_enabled = 1,
614 +         .dma_in_nbr = SER2_RX_DMA_NBR,
615 +         .dma_in_irq_nbr = SER2_DMA_RX_IRQ_NBR,
616 +         .dma_in_irq_flags = IRQF_DISABLED,
617 +         .dma_in_irq_description = "serial 2 dma rec",
618  #else
619 -         .dma_in_enabled = 0
620 +         .dma_in_enabled = 0,
621 +         .dma_in_nbr = UINT_MAX,
622 +         .dma_in_irq_nbr = 0,
623 +         .dma_in_irq_flags = 0,
624 +         .dma_in_irq_description = NULL,
625  #endif
626  #else
627            .enabled  = 0,
628 +         .io_if_description = NULL,
629           .dma_out_enabled = 0,
630           .dma_in_enabled = 0
631  #endif
632 @@ -780,20 +842,40 @@
633           .rx_ctrl     = DEF_RX,
634           .tx_ctrl     = DEF_TX,
635           .iseteop     = 1,
636 +         .dma_owner   = dma_ser3,
637 +         .io_if       = if_serial_3,
638  #ifdef CONFIG_ETRAX_SERIAL_PORT3
639            .enabled  = 1,
640 +         .io_if_description = "ser3",
641  #ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT
642           .dma_out_enabled = 1,
643 +         .dma_out_nbr = SER3_TX_DMA_NBR,
644 +         .dma_out_irq_nbr = SER3_DMA_TX_IRQ_NBR,
645 +         .dma_out_irq_flags = IRQF_DISABLED,
646 +         .dma_out_irq_description = "serial 3 dma tr",
647  #else
648           .dma_out_enabled = 0,
649 +         .dma_out_nbr = UINT_MAX,
650 +         .dma_out_irq_nbr = 0,
651 +         .dma_out_irq_flags = 0,
652 +         .dma_out_irq_description = NULL,
653  #endif
654  #ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN
655           .dma_in_enabled = 1,
656 +         .dma_in_nbr = SER3_RX_DMA_NBR,
657 +         .dma_in_irq_nbr = SER3_DMA_RX_IRQ_NBR,
658 +         .dma_in_irq_flags = IRQF_DISABLED,
659 +         .dma_in_irq_description = "serial 3 dma rec",
660  #else
661 -         .dma_in_enabled = 0
662 +         .dma_in_enabled = 0,
663 +         .dma_in_nbr = UINT_MAX,
664 +         .dma_in_irq_nbr = 0,
665 +         .dma_in_irq_flags = 0,
666 +         .dma_in_irq_description = NULL
667  #endif
668  #else
669            .enabled  = 0,
670 +         .io_if_description = NULL,
671           .dma_out_enabled = 0,
672           .dma_in_enabled = 0
673  #endif
674 @@ -1414,12 +1496,11 @@
675         {
676                 unsigned long flags;
677  
678 -               save_flags(flags);
679 -               cli();
680 +               local_irq_save(flags);
681                 *e100_modem_pins[info->line].dtr_shadow &= ~mask;
682                 *e100_modem_pins[info->line].dtr_shadow |= (set ? 0 : mask);
683                 *e100_modem_pins[info->line].dtr_port = *e100_modem_pins[info->line].dtr_shadow;
684 -               restore_flags(flags);
685 +               local_irq_restore(flags);
686         }
687  
688  #ifdef SERIAL_DEBUG_IO
689 @@ -1438,12 +1519,11 @@
690  {
691  #ifndef CONFIG_SVINTO_SIM
692         unsigned long flags;
693 -       save_flags(flags);
694 -       cli();
695 +       local_irq_save(flags);
696         info->rx_ctrl &= ~E100_RTS_MASK;
697         info->rx_ctrl |= (set ? 0 : E100_RTS_MASK);  /* RTS is active low */
698         info->port[REG_REC_CTRL] = info->rx_ctrl;
699 -       restore_flags(flags);
700 +       local_irq_restore(flags);
701  #ifdef SERIAL_DEBUG_IO
702         printk("ser%i rts %i\n", info->line, set);
703  #endif
704 @@ -1461,12 +1541,11 @@
705                 unsigned char mask = e100_modem_pins[info->line].ri_mask;
706                 unsigned long flags;
707  
708 -               save_flags(flags);
709 -               cli();
710 +               local_irq_save(flags);
711                 *e100_modem_pins[info->line].ri_shadow &= ~mask;
712                 *e100_modem_pins[info->line].ri_shadow |= (set ? 0 : mask);
713                 *e100_modem_pins[info->line].ri_port = *e100_modem_pins[info->line].ri_shadow;
714 -               restore_flags(flags);
715 +               local_irq_restore(flags);
716         }
717  #endif
718  }
719 @@ -1479,12 +1558,11 @@
720                 unsigned char mask = e100_modem_pins[info->line].cd_mask;
721                 unsigned long flags;
722  
723 -               save_flags(flags);
724 -               cli();
725 +               local_irq_save(flags);
726                 *e100_modem_pins[info->line].cd_shadow &= ~mask;
727                 *e100_modem_pins[info->line].cd_shadow |= (set ? 0 : mask);
728                 *e100_modem_pins[info->line].cd_port = *e100_modem_pins[info->line].cd_shadow;
729 -               restore_flags(flags);
730 +               local_irq_restore(flags);
731         }
732  #endif
733  }
734 @@ -1558,8 +1636,7 @@
735         /* Disable output DMA channel for the serial port in question
736          * ( set to something other then serialX)
737          */
738 -       save_flags(flags);
739 -       cli();
740 +       local_irq_save(flags);
741         DFLOW(DEBUG_LOG(info->line, "disable_txdma_channel %i\n", info->line));
742         if (info->line == 0) {
743                 if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma6)) ==
744 @@ -1587,7 +1664,7 @@
745                 }
746         }
747         *R_GEN_CONFIG = genconfig_shadow;
748 -       restore_flags(flags);
749 +       local_irq_restore(flags);
750  }
751  
752  
753 @@ -1595,8 +1672,7 @@
754  {
755         unsigned long flags;
756  
757 -       save_flags(flags);
758 -       cli();
759 +       local_irq_save(flags);
760         DFLOW(DEBUG_LOG(info->line, "enable_txdma_channel %i\n", info->line));
761         /* Enable output DMA channel for the serial port in question */
762         if (info->line == 0) {
763 @@ -1613,7 +1689,7 @@
764                 genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, serial3);
765         }
766         *R_GEN_CONFIG = genconfig_shadow;
767 -       restore_flags(flags);
768 +       local_irq_restore(flags);
769  }
770  
771  static void e100_disable_rxdma_channel(struct e100_serial *info)
772 @@ -1623,8 +1699,7 @@
773         /* Disable input DMA channel for the serial port in question
774          * ( set to something other then serialX)
775          */
776 -       save_flags(flags);
777 -       cli();
778 +       local_irq_save(flags);
779         if (info->line == 0) {
780                 if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma7)) ==
781                     IO_STATE(R_GEN_CONFIG, dma7, serial0)) {
782 @@ -1651,7 +1726,7 @@
783                 }
784         }
785         *R_GEN_CONFIG = genconfig_shadow;
786 -       restore_flags(flags);
787 +       local_irq_restore(flags);
788  }
789  
790  
791 @@ -1659,8 +1734,7 @@
792  {
793         unsigned long flags;
794  
795 -       save_flags(flags);
796 -       cli();
797 +       local_irq_save(flags);
798         /* Enable input DMA channel for the serial port in question */
799         if (info->line == 0) {
800                 genconfig_shadow &=  ~IO_MASK(R_GEN_CONFIG, dma7);
801 @@ -1676,7 +1750,7 @@
802                 genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, serial3);
803         }
804         *R_GEN_CONFIG = genconfig_shadow;
805 -       restore_flags(flags);
806 +       local_irq_restore(flags);
807  }
808  
809  #ifdef SERIAL_HANDLE_EARLY_ERRORS
810 @@ -1783,7 +1857,7 @@
811  }
812  
813  static int
814 -e100_write_rs485(struct tty_struct *tty, int from_user,
815 +e100_write_rs485(struct tty_struct *tty,
816                   const unsigned char *buf, int count)
817  {
818         struct e100_serial * info = (struct e100_serial *)tty->driver_data;
819 @@ -1796,7 +1870,7 @@
820          */
821         info->rs485.enabled = 1;
822         /* rs_write now deals with RS485 if enabled */
823 -       count = rs_write(tty, from_user, buf, count);
824 +       count = rs_write(tty, buf, count);
825         info->rs485.enabled = old_enabled;
826         return count;
827  }
828 @@ -1834,7 +1908,7 @@
829                 unsigned long flags;
830                 unsigned long xoff;
831  
832 -               save_flags(flags); cli();
833 +               local_irq_save(flags);
834                 DFLOW(DEBUG_LOG(info->line, "XOFF rs_stop xmit %i\n",
835                                 CIRC_CNT(info->xmit.head,
836                                          info->xmit.tail,SERIAL_XMIT_SIZE)));
837 @@ -1846,7 +1920,7 @@
838                 }
839  
840                 *((unsigned long *)&info->port[REG_XOFF]) = xoff;
841 -               restore_flags(flags);
842 +               local_irq_restore(flags);
843         }
844  }
845  
846 @@ -1858,7 +1932,7 @@
847                 unsigned long flags;
848                 unsigned long xoff;
849  
850 -               save_flags(flags); cli();
851 +               local_irq_save(flags);
852                 DFLOW(DEBUG_LOG(info->line, "XOFF rs_start xmit %i\n",
853                                 CIRC_CNT(info->xmit.head,
854                                          info->xmit.tail,SERIAL_XMIT_SIZE)));
855 @@ -1873,7 +1947,7 @@
856                     info->xmit.head != info->xmit.tail && info->xmit.buf)
857                         e100_enable_serial_tx_ready_irq(info);
858  
859 -               restore_flags(flags);
860 +               local_irq_restore(flags);
861         }
862  }
863  
864 @@ -2053,8 +2127,7 @@
865  static void flush_timeout_function(unsigned long data);
866  #define START_FLUSH_FAST_TIMER_TIME(info, string, usec) {\
867    unsigned long timer_flags; \
868 -  save_flags(timer_flags); \
869 -  cli(); \
870 +  local_irq_save(timer_flags); \
871    if (fast_timers[info->line].function == NULL) { \
872      serial_fast_timer_started++; \
873      TIMERD(DEBUG_LOG(info->line, "start_timer %i ", info->line)); \
874 @@ -2068,7 +2141,7 @@
875    else { \
876      TIMERD(DEBUG_LOG(info->line, "timer %i already running\n", info->line)); \
877    } \
878 -  restore_flags(timer_flags); \
879 +  local_irq_restore(timer_flags); \
880  }
881  #define START_FLUSH_FAST_TIMER(info, string) START_FLUSH_FAST_TIMER_TIME(info, string, info->flush_time_usec)
882  
883 @@ -2097,8 +2170,7 @@
884  {
885         unsigned long flags;
886  
887 -       save_flags(flags);
888 -       cli();
889 +       local_irq_save(flags);
890  
891         if (!info->first_recv_buffer)
892                 info->first_recv_buffer = buffer;
893 @@ -2111,7 +2183,7 @@
894         if (info->recv_cnt > info->max_recv_cnt)
895                 info->max_recv_cnt = info->recv_cnt;
896  
897 -       restore_flags(flags);
898 +       local_irq_restore(flags);
899  }
900  
901  static int
902 @@ -2131,11 +2203,7 @@
903                 info->icount.rx++;
904         } else {
905                 struct tty_struct *tty = info->tty;
906 -               *tty->flip.char_buf_ptr = data;
907 -               *tty->flip.flag_buf_ptr = flag;
908 -               tty->flip.flag_buf_ptr++;
909 -               tty->flip.char_buf_ptr++;
910 -               tty->flip.count++;
911 +               tty_insert_flip_char(tty, data, flag);
912                 info->icount.rx++;
913         }
914  
915 @@ -2320,7 +2388,6 @@
916          */
917         return;
918  #endif
919 -       info->tty->flip.count = 0;
920         if (info->uses_dma_in) {
921                 /* reset the input dma channel to be sure it works */
922  
923 @@ -2482,70 +2549,21 @@
924  {
925         struct tty_struct *tty;
926         struct etrax_recv_buffer *buffer;
927 -       unsigned int length;
928         unsigned long flags;
929 -       int max_flip_size;
930 -
931 -       if (!info->first_recv_buffer)
932 -               return;
933  
934 -       save_flags(flags);
935 -       cli();
936 +       local_irq_save(flags);
937 +       tty = info->tty;
938  
939 -       if (!(tty = info->tty)) {
940 -               restore_flags(flags);
941 +       if (!tty) {
942 +               local_irq_restore(flags);
943                 return;
944         }
945  
946 -       length = tty->flip.count;
947 -       /* Don't flip more than the ldisc has room for.
948 -        * The return value from ldisc.receive_room(tty) - might not be up to
949 -        * date, the previous flip of up to TTY_FLIPBUF_SIZE might be on the
950 -        * processed and not accounted for yet.
951 -        * Since we use DMA, 1 SERIAL_DESCR_BUF_SIZE could be on the way.
952 -        * Lets buffer data here and let flow control take care of it.
953 -        * Since we normally flip large chunks, the ldisc don't react
954 -        * with throttle until too late if we flip to much.
955 -        */
956 -       max_flip_size = tty->ldisc.receive_room(tty);
957 -       if (max_flip_size < 0)
958 -               max_flip_size = 0;
959 -       if (max_flip_size <= (TTY_FLIPBUF_SIZE +         /* Maybe not accounted for */
960 -                             length + info->recv_cnt +  /* We have this queued */
961 -                             2*SERIAL_DESCR_BUF_SIZE +    /* This could be on the way */
962 -                             TTY_THRESHOLD_THROTTLE)) { /* Some slack */
963 -               /* check TTY_THROTTLED first so it indicates our state */
964 -               if (!test_and_set_bit(TTY_THROTTLED, &tty->flags)) {
965 -                       DFLOW(DEBUG_LOG(info->line,"flush_to_flip throttles room %lu\n", max_flip_size));
966 -                       rs_throttle(tty);
967 -               }
968 -#if 0
969 -               else if (max_flip_size <= (TTY_FLIPBUF_SIZE +         /* Maybe not accounted for */
970 -                                          length + info->recv_cnt +  /* We have this queued */
971 -                                          SERIAL_DESCR_BUF_SIZE +    /* This could be on the way */
972 -                                          TTY_THRESHOLD_THROTTLE)) { /* Some slack */
973 -                       DFLOW(DEBUG_LOG(info->line,"flush_to_flip throttles again! %lu\n", max_flip_size));
974 -                       rs_throttle(tty);
975 -               }
976 -#endif
977 -       }
978 -
979 -       if (max_flip_size > TTY_FLIPBUF_SIZE)
980 -               max_flip_size = TTY_FLIPBUF_SIZE;
981 -
982 -       while ((buffer = info->first_recv_buffer) && length < max_flip_size) {
983 +       while ((buffer = info->first_recv_buffer)) {
984                 unsigned int count = buffer->length;
985  
986 -               if (length + count > max_flip_size)
987 -                       count = max_flip_size - length;
988 -
989 -               memcpy(tty->flip.char_buf_ptr + length, buffer->buffer, count);
990 -               memset(tty->flip.flag_buf_ptr + length, TTY_NORMAL, count);
991 -               tty->flip.flag_buf_ptr[length] = buffer->error;
992 -
993 -               length += count;
994 +               tty_insert_flip_string(tty, buffer->buffer, count);
995                 info->recv_cnt -= count;
996 -               DFLIP(DEBUG_LOG(info->line,"flip: %i\n", length));
997  
998                 if (count == buffer->length) {
999                         info->first_recv_buffer = buffer->next;
1000 @@ -2560,24 +2578,7 @@
1001         if (!info->first_recv_buffer)
1002                 info->last_recv_buffer = NULL;
1003  
1004 -       tty->flip.count = length;
1005 -       DFLIP(if (tty->ldisc.chars_in_buffer(tty) > 3500) {
1006 -               DEBUG_LOG(info->line, "ldisc %lu\n",
1007 -                         tty->ldisc.chars_in_buffer(tty));
1008 -               DEBUG_LOG(info->line, "flip.count %lu\n",
1009 -                         tty->flip.count);
1010 -             }
1011 -             );
1012 -       restore_flags(flags);
1013 -
1014 -       DFLIP(
1015 -         if (1) {
1016 -                 DEBUG_LOG(info->line, "*** rxtot %i\n", info->icount.rx);
1017 -                 DEBUG_LOG(info->line, "ldisc %lu\n", tty->ldisc.chars_in_buffer(tty));
1018 -                 DEBUG_LOG(info->line, "room  %lu\n", tty->ldisc.receive_room(tty));
1019 -         }
1020 -
1021 -       );
1022 +       local_irq_restore(flags);
1023  
1024         /* this includes a check for low-latency */
1025         tty_flip_buffer_push(tty);
1026 @@ -2722,21 +2723,7 @@
1027                 printk("!NO TTY!\n");
1028                 return info;
1029         }
1030 -       if (tty->flip.count >= TTY_FLIPBUF_SIZE - TTY_THRESHOLD_THROTTLE) {
1031 -               /* check TTY_THROTTLED first so it indicates our state */
1032 -               if (!test_and_set_bit(TTY_THROTTLED, &tty->flags)) {
1033 -                       DFLOW(DEBUG_LOG(info->line, "rs_throttle flip.count: %i\n", tty->flip.count));
1034 -                       rs_throttle(tty);
1035 -               }
1036 -       }
1037 -       if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
1038 -               DEBUG_LOG(info->line, "force FLIP! %i\n", tty->flip.count);
1039 -               tty->flip.work.func((void *) tty);
1040 -               if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
1041 -                       DEBUG_LOG(info->line, "FLIP FULL! %i\n", tty->flip.count);
1042 -                       return info;            /* if TTY_DONT_FLIP is set */
1043 -               }
1044 -       }
1045 +
1046         /* Read data and status at the same time */
1047         data_read = *((unsigned long *)&info->port[REG_DATA_STATUS32]);
1048  more_data:
1049 @@ -2789,27 +2776,25 @@
1050                                 DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt);
1051                                 info->errorcode = ERRCODE_INSERT_BREAK;
1052                         } else {
1053 +                               unsigned char data = IO_EXTRACT(R_SERIAL0_READ, data_in, data_read);
1054 +                               char flag = TTY_NORMAL;
1055                                 if (info->errorcode == ERRCODE_INSERT_BREAK) {
1056 -                                       info->icount.brk++;
1057 -                                       *tty->flip.char_buf_ptr = 0;
1058 -                                       *tty->flip.flag_buf_ptr = TTY_BREAK;
1059 -                                       tty->flip.flag_buf_ptr++;
1060 -                                       tty->flip.char_buf_ptr++;
1061 -                                       tty->flip.count++;
1062 +                                       struct tty_struct *tty = info->tty;
1063 +                                       tty_insert_flip_char(tty, 0, flag);
1064                                         info->icount.rx++;
1065                                 }
1066 -                               *tty->flip.char_buf_ptr = IO_EXTRACT(R_SERIAL0_READ, data_in, data_read);
1067  
1068                                 if (data_read & IO_MASK(R_SERIAL0_READ, par_err)) {
1069                                         info->icount.parity++;
1070 -                                       *tty->flip.flag_buf_ptr = TTY_PARITY;
1071 +                                       flag = TTY_PARITY;
1072                                 } else if (data_read & IO_MASK(R_SERIAL0_READ, overrun)) {
1073                                         info->icount.overrun++;
1074 -                                       *tty->flip.flag_buf_ptr = TTY_OVERRUN;
1075 +                                       flag = TTY_OVERRUN;
1076                                 } else if (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) {
1077                                         info->icount.frame++;
1078 -                                       *tty->flip.flag_buf_ptr = TTY_FRAME;
1079 +                                       flag = TTY_FRAME;
1080                                 }
1081 +                               tty_insert_flip_char(tty, data, flag);
1082                                 info->errorcode = 0;
1083                         }
1084                         info->break_detected_cnt = 0;
1085 @@ -2825,16 +2810,12 @@
1086                         log_int(rdpc(), 0, 0);
1087                 }
1088                 );
1089 -               *tty->flip.char_buf_ptr = IO_EXTRACT(R_SERIAL0_READ, data_in, data_read);
1090 -               *tty->flip.flag_buf_ptr = 0;
1091 +               tty_insert_flip_char(tty, IO_EXTRACT(R_SERIAL0_READ, data_in, data_read), TTY_NORMAL);
1092         } else {
1093                 DEBUG_LOG(info->line, "ser_rx int but no data_avail  %08lX\n", data_read);
1094         }
1095  
1096  
1097 -       tty->flip.flag_buf_ptr++;
1098 -       tty->flip.char_buf_ptr++;
1099 -       tty->flip.count++;
1100         info->icount.rx++;
1101         data_read = *((unsigned long *)&info->port[REG_DATA_STATUS32]);
1102         if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) {
1103 @@ -2972,7 +2953,7 @@
1104         if (info->x_char) {
1105                 unsigned char rstat;
1106                 DFLOW(DEBUG_LOG(info->line, "tx_int: xchar 0x%02X\n", info->x_char));
1107 -               save_flags(flags); cli();
1108 +               local_irq_save(flags);
1109                 rstat = info->port[REG_STATUS];
1110                 DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
1111  
1112 @@ -2981,7 +2962,7 @@
1113                 info->x_char = 0;
1114                 /* We must enable since it is disabled in ser_interrupt */
1115                 e100_enable_serial_tx_ready_irq(info);
1116 -               restore_flags(flags);
1117 +               local_irq_restore(flags);
1118                 return;
1119         }
1120         if (info->uses_dma_out) {
1121 @@ -2989,7 +2970,7 @@
1122                 int i;
1123                 /* We only use normal tx interrupt when sending x_char */
1124                 DFLOW(DEBUG_LOG(info->line, "tx_int: xchar sent\n", 0));
1125 -               save_flags(flags); cli();
1126 +               local_irq_save(flags);
1127                 rstat = info->port[REG_STATUS];
1128                 DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
1129                 e100_disable_serial_tx_ready_irq(info);
1130 @@ -3002,7 +2983,7 @@
1131                         nop();
1132  
1133                 *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, continue);
1134 -               restore_flags(flags);
1135 +               local_irq_restore(flags);
1136                 return;
1137         }
1138         /* Normal char-by-char interrupt */
1139 @@ -3016,7 +2997,7 @@
1140         }
1141         DINTR2(DEBUG_LOG(info->line, "tx_int %c\n", info->xmit.buf[info->xmit.tail]));
1142         /* Send a byte, rs485 timing is critical so turn of ints */
1143 -       save_flags(flags); cli();
1144 +       local_irq_save(flags);
1145         info->port[REG_TR_DATA] = info->xmit.buf[info->xmit.tail];
1146         info->xmit.tail = (info->xmit.tail + 1) & (SERIAL_XMIT_SIZE-1);
1147         info->icount.tx++;
1148 @@ -3040,7 +3021,7 @@
1149                 /* We must enable since it is disabled in ser_interrupt */
1150                 e100_enable_serial_tx_ready_irq(info);
1151         }
1152 -       restore_flags(flags);
1153 +       local_irq_restore(flags);
1154  
1155         if (CIRC_CNT(info->xmit.head,
1156                      info->xmit.tail,
1157 @@ -3065,7 +3046,7 @@
1158         int handled = 0;
1159         static volatile unsigned long reentered_ready_mask = 0;
1160  
1161 -       save_flags(flags); cli();
1162 +       local_irq_save(flags);
1163         irq_mask1_rd = *R_IRQ_MASK1_RD;
1164         /* First handle all rx interrupts with ints disabled */
1165         info = rs_table;
1166 @@ -3110,7 +3091,7 @@
1167                         /* Unblock the serial interrupt */
1168                         *R_VECT_MASK_SET = IO_STATE(R_VECT_MASK_SET, serial, set);
1169  
1170 -                       sti();
1171 +                       local_irq_enable();
1172                         ready_mask = (1 << (8+1+2*0)); /* ser0 tr_ready */
1173                         info = rs_table;
1174                         for (i = 0; i < NR_PORTS; i++) {
1175 @@ -3123,11 +3104,11 @@
1176                                 ready_mask <<= 2;
1177                         }
1178                         /* handle_ser_tx_interrupt enables tr_ready interrupts */
1179 -                       cli();
1180 +                       local_irq_disable();
1181                         /* Handle reentered TX interrupt */
1182                         irq_mask1_rd = reentered_ready_mask;
1183                 }
1184 -               cli();
1185 +               local_irq_disable();
1186                 tx_started = 0;
1187         } else {
1188                 unsigned long ready_mask;
1189 @@ -3143,7 +3124,7 @@
1190                 }
1191         }
1192  
1193 -       restore_flags(flags);
1194 +       local_irq_restore(flags);
1195         return IRQ_RETVAL(handled);
1196  } /* ser_interrupt */
1197  #endif
1198 @@ -3192,13 +3173,12 @@
1199         if (!xmit_page)
1200                 return -ENOMEM;
1201  
1202 -       save_flags(flags);
1203 -       cli();
1204 +       local_irq_save(flags);
1205  
1206         /* if it was already initialized, skip this */
1207  
1208         if (info->flags & ASYNC_INITIALIZED) {
1209 -               restore_flags(flags);
1210 +               local_irq_restore(flags);
1211                 free_page(xmit_page);
1212                 return 0;
1213         }
1214 @@ -3324,7 +3304,7 @@
1215  
1216         info->flags |= ASYNC_INITIALIZED;
1217  
1218 -       restore_flags(flags);
1219 +       local_irq_restore(flags);
1220         return 0;
1221  }
1222  
1223 @@ -3375,8 +3355,7 @@
1224                info->irq);
1225  #endif
1226  
1227 -       save_flags(flags);
1228 -       cli(); /* Disable interrupts */
1229 +       local_irq_save(flags);
1230  
1231         if (info->xmit.buf) {
1232                 free_page((unsigned long)info->xmit.buf);
1233 @@ -3400,7 +3379,7 @@
1234                 set_bit(TTY_IO_ERROR, &info->tty->flags);
1235  
1236         info->flags &= ~ASYNC_INITIALIZED;
1237 -       restore_flags(flags);
1238 +       local_irq_restore(flags);
1239  }
1240  
1241  
1242 @@ -3492,8 +3471,7 @@
1243  
1244  #ifndef CONFIG_SVINTO_SIM
1245         /* start with default settings and then fill in changes */
1246 -       save_flags(flags);
1247 -       cli();
1248 +       local_irq_save(flags);
1249         /* 8 bit, no/even parity */
1250         info->rx_ctrl &= ~(IO_MASK(R_SERIAL0_REC_CTRL, rec_bitnr) |
1251                            IO_MASK(R_SERIAL0_REC_CTRL, rec_par_en) |
1252 @@ -3557,7 +3535,7 @@
1253         }
1254  
1255         *((unsigned long *)&info->port[REG_XOFF]) = xoff;
1256 -       restore_flags(flags);
1257 +       local_irq_restore(flags);
1258  #endif /* !CONFIG_SVINTO_SIM */
1259  
1260         update_char_time(info);
1261 @@ -3585,13 +3563,12 @@
1262  
1263         /* this protection might not exactly be necessary here */
1264  
1265 -       save_flags(flags);
1266 -       cli();
1267 +       local_irq_save(flags);
1268         start_transmit(info);
1269 -       restore_flags(flags);
1270 +       local_irq_restore(flags);
1271  }
1272  
1273 -static int rs_raw_write(struct tty_struct * tty, int from_user,
1274 +static int rs_raw_write(struct tty_struct * tty,
1275                         const unsigned char *buf, int count)
1276  {
1277         int     c, ret = 0;
1278 @@ -3614,72 +3591,37 @@
1279         SIMCOUT(buf, count);
1280         return count;
1281  #endif
1282 -       save_flags(flags);
1283 +       local_save_flags(flags);
1284         DFLOW(DEBUG_LOG(info->line, "write count %i ", count));
1285         DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty)));
1286  
1287  
1288 -       /* the cli/restore_flags pairs below are needed because the
1289 +       /* the local_irq_disable/restore_flags pairs below are needed because the
1290          * DMA interrupt handler moves the info->xmit values. the memcpy
1291          * needs to be in the critical region unfortunately, because we
1292          * need to read xmit values, memcpy, write xmit values in one
1293          * atomic operation... this could perhaps be avoided by more clever
1294          * design.
1295          */
1296 -       if (from_user) {
1297 -               mutex_lock(&tmp_buf_mutex);
1298 -               while (1) {
1299 -                       int c1;
1300 -                       c = CIRC_SPACE_TO_END(info->xmit.head,
1301 -                                             info->xmit.tail,
1302 -                                             SERIAL_XMIT_SIZE);
1303 -                       if (count < c)
1304 -                               c = count;
1305 -                       if (c <= 0)
1306 -                               break;
1307 -
1308 -                       c -= copy_from_user(tmp_buf, buf, c);
1309 -                       if (!c) {
1310 -                               if (!ret)
1311 -                                       ret = -EFAULT;
1312 -                               break;
1313 -                       }
1314 -                       cli();
1315 -                       c1 = CIRC_SPACE_TO_END(info->xmit.head,
1316 -                                              info->xmit.tail,
1317 -                                              SERIAL_XMIT_SIZE);
1318 -                       if (c1 < c)
1319 -                               c = c1;
1320 -                       memcpy(info->xmit.buf + info->xmit.head, tmp_buf, c);
1321 -                       info->xmit.head = ((info->xmit.head + c) &
1322 -                                          (SERIAL_XMIT_SIZE-1));
1323 -                       restore_flags(flags);
1324 -                       buf += c;
1325 -                       count -= c;
1326 -                       ret += c;
1327 -               }
1328 -               mutex_unlock(&tmp_buf_mutex);
1329 -       } else {
1330 -               cli();
1331 -               while (count) {
1332 -                       c = CIRC_SPACE_TO_END(info->xmit.head,
1333 -                                             info->xmit.tail,
1334 -                                             SERIAL_XMIT_SIZE);
1335 -
1336 -                       if (count < c)
1337 -                               c = count;
1338 -                       if (c <= 0)
1339 -                               break;
1340 -
1341 -                       memcpy(info->xmit.buf + info->xmit.head, buf, c);
1342 -                       info->xmit.head = (info->xmit.head + c) &
1343 -                               (SERIAL_XMIT_SIZE-1);
1344 -                       buf += c;
1345 -                       count -= c;
1346 -                       ret += c;
1347 -               }
1348 -               restore_flags(flags);
1349 +       local_irq_disable();
1350 +       while (count) {
1351 +               c = CIRC_SPACE_TO_END(info->xmit.head,
1352 +                                     info->xmit.tail,
1353 +                                     SERIAL_XMIT_SIZE);
1354 +
1355 +               if (count < c)
1356 +                       c = count;
1357 +               if (c <= 0)
1358 +                       break;
1359 +
1360 +               memcpy(info->xmit.buf + info->xmit.head, buf, c);
1361 +               info->xmit.head = (info->xmit.head + c) &
1362 +                       (SERIAL_XMIT_SIZE-1);
1363 +               buf += c;
1364 +               count -= c;
1365 +               ret += c;
1366         }
1367 +       local_irq_restore(flags);
1368  
1369         /* enable transmitter if not running, unless the tty is stopped
1370          * this does not need IRQ protection since if tr_running == 0
1371 @@ -3698,7 +3640,7 @@
1372  } /* raw_raw_write() */
1373  
1374  static int
1375 -rs_write(struct tty_struct * tty, int from_user,
1376 +rs_write(struct tty_struct * tty,
1377          const unsigned char *buf, int count)
1378  {
1379  #if defined(CONFIG_ETRAX_RS485)
1380 @@ -3725,7 +3667,7 @@
1381         }
1382  #endif /* CONFIG_ETRAX_RS485 */
1383  
1384 -       count = rs_raw_write(tty, from_user, buf, count);
1385 +       count = rs_raw_write(tty, buf, count);
1386  
1387  #if defined(CONFIG_ETRAX_RS485)
1388         if (info->rs485.enabled)
1389 @@ -3793,10 +3735,9 @@
1390         struct e100_serial *info = (struct e100_serial *)tty->driver_data;
1391         unsigned long flags;
1392  
1393 -       save_flags(flags);
1394 -       cli();
1395 +       local_irq_save(flags);
1396         info->xmit.head = info->xmit.tail = 0;
1397 -       restore_flags(flags);
1398 +       local_irq_restore(flags);
1399  
1400         wake_up_interruptible(&tty->write_wait);
1401  
1402 @@ -3818,7 +3759,7 @@
1403  {
1404         struct e100_serial *info = (struct e100_serial *)tty->driver_data;
1405         unsigned long flags;
1406 -       save_flags(flags); cli();
1407 +       local_irq_save(flags);
1408         if (info->uses_dma_out) {
1409                 /* Put the DMA on hold and disable the channel */
1410                 *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, hold);
1411 @@ -3835,7 +3776,7 @@
1412         DFLOW(DEBUG_LOG(info->line, "rs_send_xchar 0x%02X\n", ch));
1413         info->x_char = ch;
1414         e100_enable_serial_tx_ready_irq(info);
1415 -       restore_flags(flags);
1416 +       local_irq_restore(flags);
1417  }
1418  
1419  /*
1420 @@ -4085,61 +4026,6 @@
1421         return 0;
1422  }
1423  
1424 -
1425 -static int
1426 -set_modem_info(struct e100_serial * info, unsigned int cmd,
1427 -              unsigned int *value)
1428 -{
1429 -       unsigned int arg;
1430 -
1431 -       if (copy_from_user(&arg, value, sizeof(int)))
1432 -               return -EFAULT;
1433 -
1434 -       switch (cmd) {
1435 -       case TIOCMBIS:
1436 -               if (arg & TIOCM_RTS) {
1437 -                       e100_rts(info, 1);
1438 -               }
1439 -               if (arg & TIOCM_DTR) {
1440 -                       e100_dtr(info, 1);
1441 -               }
1442 -               /* Handle FEMALE behaviour */
1443 -               if (arg & TIOCM_RI) {
1444 -                       e100_ri_out(info, 1);
1445 -               }
1446 -               if (arg & TIOCM_CD) {
1447 -                       e100_cd_out(info, 1);
1448 -               }
1449 -               break;
1450 -       case TIOCMBIC:
1451 -               if (arg & TIOCM_RTS) {
1452 -                       e100_rts(info, 0);
1453 -               }
1454 -               if (arg & TIOCM_DTR) {
1455 -                       e100_dtr(info, 0);
1456 -               }
1457 -               /* Handle FEMALE behaviour */
1458 -               if (arg & TIOCM_RI) {
1459 -                       e100_ri_out(info, 0);
1460 -               }
1461 -               if (arg & TIOCM_CD) {
1462 -                       e100_cd_out(info, 0);
1463 -               }
1464 -               break;
1465 -       case TIOCMSET:
1466 -               e100_rts(info, arg & TIOCM_RTS);
1467 -               e100_dtr(info, arg & TIOCM_DTR);
1468 -               /* Handle FEMALE behaviour */
1469 -               e100_ri_out(info, arg & TIOCM_RI);
1470 -               e100_cd_out(info, arg & TIOCM_CD);
1471 -               break;
1472 -       default:
1473 -               return -EINVAL;
1474 -       }
1475 -       return 0;
1476 -}
1477 -
1478 -
1479  static void
1480  rs_break(struct tty_struct *tty, int break_state)
1481  {
1482 @@ -4149,8 +4035,7 @@
1483         if (!info->port)
1484                 return;
1485  
1486 -       save_flags(flags);
1487 -       cli();
1488 +       local_irq_save(flags);
1489         if (break_state == -1) {
1490                 /* Go to manual mode and set the txd pin to 0 */
1491                 info->tx_ctrl &= 0x3F; /* Clear bit 7 (txd) and 6 (tr_enable) */
1492 @@ -4158,7 +4043,42 @@
1493                 info->tx_ctrl |= (0x80 | 0x40); /* Set bit 7 (txd) and 6 (tr_enable) */
1494         }
1495         info->port[REG_TR_CTRL] = info->tx_ctrl;
1496 -       restore_flags(flags);
1497 +       local_irq_restore(flags);
1498 +}
1499 +
1500 +static int
1501 +rs_tiocmset(struct tty_struct *tty, struct file * file, unsigned int set, unsigned int clear)
1502 +{
1503 +       struct e100_serial * info = (struct e100_serial *)tty->driver_data;
1504 +  
1505 +       if (clear & TIOCM_RTS) {
1506 +               e100_rts(info, 0);
1507 +       }
1508 +       if (clear & TIOCM_DTR) {
1509 +               e100_dtr(info, 0);
1510 +       }
1511 +       /* Handle FEMALE behaviour */
1512 +       if (clear & TIOCM_RI) {
1513 +               e100_ri_out(info, 0);
1514 +       }
1515 +       if (clear & TIOCM_CD) {
1516 +               e100_cd_out(info, 0);
1517 +       }
1518 +
1519 +       if (set & TIOCM_RTS) {
1520 +               e100_rts(info, 1);
1521 +       }
1522 +       if (set & TIOCM_DTR) {
1523 +               e100_dtr(info, 1);
1524 +       }
1525 +       /* Handle FEMALE behaviour */
1526 +       if (set & TIOCM_RI) {
1527 +               e100_ri_out(info, 1);
1528 +       }
1529 +       if (set & TIOCM_CD) {
1530 +               e100_cd_out(info, 1);
1531 +       }
1532 +       return 0;
1533  }
1534  
1535  static int
1536 @@ -4177,10 +4097,6 @@
1537         switch (cmd) {
1538                 case TIOCMGET:
1539                         return get_modem_info(info, (unsigned int *) arg);
1540 -               case TIOCMBIS:
1541 -               case TIOCMBIC:
1542 -               case TIOCMSET:
1543 -                       return set_modem_info(info, cmd, (unsigned int *) arg);
1544                 case TIOCGSERIAL:
1545                         return get_serial_info(info,
1546                                                (struct serial_struct *) arg);
1547 @@ -4212,7 +4128,7 @@
1548                         if (copy_from_user(&rs485wr, (struct rs485_write*)arg, sizeof(rs485wr)))
1549                                 return -EFAULT;
1550  
1551 -                       return e100_write_rs485(tty, 1, rs485wr.outc, rs485wr.outc_size);
1552 +                       return e100_write_rs485(tty, rs485wr.outc, rs485wr.outc_size);
1553                 }
1554  #endif
1555  
1556 @@ -4242,46 +4158,6 @@
1557  
1558  }
1559  
1560 -/* In debugport.c - register a console write function that uses the normal
1561 - * serial driver
1562 - */
1563 -typedef int (*debugport_write_function)(int i, const char *buf, unsigned int len);
1564 -
1565 -extern debugport_write_function debug_write_function;
1566 -
1567 -static int rs_debug_write_function(int i, const char *buf, unsigned int len)
1568 -{
1569 -       int cnt;
1570 -       int written = 0;
1571 -        struct tty_struct *tty;
1572 -        static int recurse_cnt = 0;
1573 -
1574 -        tty = rs_table[i].tty;
1575 -        if (tty)  {
1576 -               unsigned long flags;
1577 -               if (recurse_cnt > 5) /* We skip this debug output */
1578 -                       return 1;
1579 -
1580 -               local_irq_save(flags);
1581 -               recurse_cnt++;
1582 -               local_irq_restore(flags);
1583 -                do {
1584 -                        cnt = rs_write(tty, 0, buf + written, len);
1585 -                        if (cnt >= 0) {
1586 -                               written += cnt;
1587 -                                buf += cnt;
1588 -                                len -= cnt;
1589 -                        } else
1590 -                                len = cnt;
1591 -                } while(len > 0);
1592 -               local_irq_save(flags);
1593 -               recurse_cnt--;
1594 -               local_irq_restore(flags);
1595 -                return 1;
1596 -        }
1597 -        return 0;
1598 -}
1599 -
1600  /*
1601   * ------------------------------------------------------------
1602   * rs_close()
1603 @@ -4303,11 +4179,10 @@
1604  
1605         /* interrupts are disabled for this entire function */
1606  
1607 -       save_flags(flags);
1608 -       cli();
1609 +       local_irq_save(flags);
1610  
1611         if (tty_hung_up_p(filp)) {
1612 -               restore_flags(flags);
1613 +               local_irq_restore(flags);
1614                 return;
1615         }
1616  
1617 @@ -4334,7 +4209,7 @@
1618                 info->count = 0;
1619         }
1620         if (info->count) {
1621 -               restore_flags(flags);
1622 +               local_irq_restore(flags);
1623                 return;
1624         }
1625         info->flags |= ASYNC_CLOSING;
1626 @@ -4388,7 +4263,7 @@
1627         }
1628         info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
1629         wake_up_interruptible(&info->close_wait);
1630 -       restore_flags(flags);
1631 +       local_irq_restore(flags);
1632  
1633         /* port closed */
1634  
1635 @@ -4410,6 +4285,28 @@
1636  #endif
1637         }
1638  #endif
1639 +
1640 +       /*
1641 +        * Release any allocated DMA irq's.
1642 +        */
1643 +       if (info->dma_in_enabled) {
1644 +               cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
1645 +               free_irq(info->dma_in_irq_nbr,
1646 +                        info);
1647 +               info->uses_dma_in = 0;
1648 +#ifdef SERIAL_DEBUG_OPEN
1649 +               printk("DMA irq '%s' freed\n", info->dma_in_irq_description);
1650 +#endif
1651 +       }
1652 +       if (info->dma_out_enabled) {
1653 +               free_irq(info->dma_out_irq_nbr,
1654 +                        info);
1655 +               cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
1656 +               info->uses_dma_out = 0;
1657 +#ifdef SERIAL_DEBUG_OPEN
1658 +               printk("DMA irq '%s' freed\n", info->dma_out_irq_description);
1659 +#endif
1660 +       }
1661  }
1662  
1663  /*
1664 @@ -4485,7 +4382,7 @@
1665         if (tty_hung_up_p(filp) ||
1666             (info->flags & ASYNC_CLOSING)) {
1667                 if (info->flags & ASYNC_CLOSING)
1668 -                       interruptible_sleep_on(&info->close_wait);
1669 +                       wait_event_interruptible(info->close_wait, 0);
1670  #ifdef SERIAL_DO_RESTART
1671                 if (info->flags & ASYNC_HUP_NOTIFY)
1672                         return -EAGAIN;
1673 @@ -4523,21 +4420,19 @@
1674         printk("block_til_ready before block: ttyS%d, count = %d\n",
1675                info->line, info->count);
1676  #endif
1677 -       save_flags(flags);
1678 -       cli();
1679 +       local_irq_save(flags);
1680         if (!tty_hung_up_p(filp)) {
1681                 extra_count++;
1682                 info->count--;
1683         }
1684 -       restore_flags(flags);
1685 +       local_irq_restore(flags);
1686         info->blocked_open++;
1687         while (1) {
1688 -               save_flags(flags);
1689 -               cli();
1690 +               local_irq_save(flags);
1691                 /* assert RTS and DTR */
1692                 e100_rts(info, 1);
1693                 e100_dtr(info, 1);
1694 -               restore_flags(flags);
1695 +               local_irq_restore(flags);
1696                 set_current_state(TASK_INTERRUPTIBLE);
1697                 if (tty_hung_up_p(filp) ||
1698                     !(info->flags & ASYNC_INITIALIZED)) {
1699 @@ -4589,9 +4484,9 @@
1700         struct e100_serial      *info;
1701         int                     retval, line;
1702         unsigned long           page;
1703 +       int                     allocated_resources = 0;
1704  
1705         /* find which port we want to open */
1706 -
1707         line = tty->index;
1708  
1709         if (line < 0 || line >= NR_PORTS)
1710 @@ -4632,7 +4527,7 @@
1711         if (tty_hung_up_p(filp) ||
1712             (info->flags & ASYNC_CLOSING)) {
1713                 if (info->flags & ASYNC_CLOSING)
1714 -                       interruptible_sleep_on(&info->close_wait);
1715 +                       wait_event_interruptible(info->close_wait, 0);
1716  #ifdef SERIAL_DO_RESTART
1717                 return ((info->flags & ASYNC_HUP_NOTIFY) ?
1718                         -EAGAIN : -ERESTARTSYS);
1719 @@ -4642,12 +4537,79 @@
1720         }
1721  
1722         /*
1723 +        * If DMA is enabled try to allocate the irq's.
1724 +        */
1725 +       if (info->count == 1) {
1726 +               allocated_resources = 1;
1727 +               if (info->dma_in_enabled) {
1728 +                       if (request_irq(info->dma_in_irq_nbr,
1729 +                                       rec_interrupt,
1730 +                                       info->dma_in_irq_flags,
1731 +                                       info->dma_in_irq_description,
1732 +                                       info)) {
1733 +                               printk(KERN_WARNING "DMA irq '%s' busy; falling back to non-DMA mode\n", info->dma_in_irq_description);
1734 +                               /* Make sure we never try to use DMA in for the port again. */
1735 +                               info->dma_in_enabled = 0;
1736 +                       } else if (cris_request_dma(info->dma_in_nbr,
1737 +                                                   info->dma_in_irq_description,
1738 +                                                   DMA_VERBOSE_ON_ERROR,
1739 +                                                   info->dma_owner)) {
1740 +                               free_irq(info->dma_in_irq_nbr, info);
1741 +                               printk(KERN_WARNING "DMA '%s' busy; falling back to non-DMA mode\n", info->dma_in_irq_description);
1742 +                               /* Make sure we never try to use DMA in for the port again. */
1743 +                               info->dma_in_enabled = 0;
1744 +                       }
1745 +#ifdef SERIAL_DEBUG_OPEN
1746 +                       else printk("DMA irq '%s' allocated\n", info->dma_in_irq_description);
1747 +#endif
1748 +               }
1749 +               if (info->dma_out_enabled) {
1750 +                       if (request_irq(info->dma_out_irq_nbr,
1751 +                                              tr_interrupt,
1752 +                                              info->dma_out_irq_flags,
1753 +                                              info->dma_out_irq_description,
1754 +                                              info)) {
1755 +                               printk(KERN_WARNING "DMA irq '%s' busy; falling back to non-DMA mode\n", info->dma_out_irq_description);
1756 +                               /* Make sure we never try to use DMA out for the port again. */
1757 +                               info->dma_out_enabled = 0;
1758 +                       } else if (cris_request_dma(info->dma_out_nbr,
1759 +                                            info->dma_out_irq_description,
1760 +                                            DMA_VERBOSE_ON_ERROR,
1761 +                                            info->dma_owner)) {
1762 +                               free_irq(info->dma_out_irq_nbr, info);
1763 +                               printk(KERN_WARNING "DMA '%s' busy; falling back to non-DMA mode\n", info->dma_out_irq_description);
1764 +                               /* Make sure we never try to use DMA in for the port again. */
1765 +                               info->dma_out_enabled = 0;
1766 +                       }
1767 +#ifdef SERIAL_DEBUG_OPEN
1768 +                       else printk("DMA irq '%s' allocated\n", info->dma_out_irq_description);
1769 +#endif
1770 +               }
1771 +       }
1772 +
1773 +       /*
1774          * Start up the serial port
1775          */
1776  
1777         retval = startup(info);
1778 -       if (retval)
1779 -               return retval;
1780 +       if (retval) {
1781 +               if (allocated_resources) {
1782 +                       if (info->dma_out_enabled) {
1783 +                               cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
1784 +                               free_irq(info->dma_out_irq_nbr,
1785 +                                        info);
1786 +                       }
1787 +                       if (info->dma_in_enabled) {
1788 +                               cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
1789 +                               free_irq(info->dma_in_irq_nbr,
1790 +                                        info);
1791 +                       }
1792 +               }
1793 +               /* FIXME Decrease count info->count here too? */
1794 +               return retval;
1795 +  
1796 +       }
1797 +
1798  
1799         retval = block_til_ready(tty, filp, info);
1800         if (retval) {
1801 @@ -4655,6 +4617,19 @@
1802                 printk("rs_open returning after block_til_ready with %d\n",
1803                        retval);
1804  #endif
1805 +               if (allocated_resources) {
1806 +                       if (info->dma_out_enabled) {
1807 +                               cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
1808 +                               free_irq(info->dma_out_irq_nbr,
1809 +                                        info);
1810 +                       }
1811 +                       if (info->dma_in_enabled) {
1812 +                               cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
1813 +                               free_irq(info->dma_in_irq_nbr,
1814 +                                        info);
1815 +                       }
1816 +               }
1817 +               
1818                 return retval;
1819         }
1820  
1821 @@ -4844,6 +4819,7 @@
1822         .send_xchar = rs_send_xchar,
1823         .wait_until_sent = rs_wait_until_sent,
1824         .read_proc = rs_read_proc,
1825 +       .tiocmset = rs_tiocmset
1826  };
1827  
1828  static int __init
1829 @@ -4863,7 +4839,22 @@
1830  #if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER)
1831         init_timer(&flush_timer);
1832         flush_timer.function = timed_flush_handler;
1833 -       mod_timer(&flush_timer, jiffies + CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS);
1834 +       mod_timer(&flush_timer, jiffies + 5);
1835 +#endif
1836 +
1837 +#if defined(CONFIG_ETRAX_RS485)
1838 +#if defined(CONFIG_ETRAX_RS485_ON_PA)
1839 +       if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit, rs485_pa_bit)) {
1840 +               printk(KERN_CRIT "ETRAX100LX serial: Could not allocate RS485 pin\n");
1841 +               return -EBUSY;
1842 +       }
1843 +#endif
1844 +#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
1845 +       if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit, rs485_port_g_bit)) {
1846 +               printk(KERN_CRIT "ETRAX100LX serial: Could not allocate RS485 pin\n");
1847 +               return -EBUSY;
1848 +       }
1849 +#endif
1850  #endif
1851  
1852         /* Initialize the tty_driver structure */
1853 @@ -4888,6 +4879,14 @@
1854         /* do some initializing for the separate ports */
1855  
1856         for (i = 0, info = rs_table; i < NR_PORTS; i++,info++) {
1857 +               if (info->enabled) {
1858 +                       if (cris_request_io_interface(info->io_if, info->io_if_description)) {
1859 +                               printk(KERN_CRIT "ETRAX100LX async serial: Could not allocate IO pins for %s, port %d\n",
1860 +                                      info->io_if_description,
1861 +                                      i);
1862 +                               info->enabled = 0;
1863 +                       }
1864 +               }
1865                 info->uses_dma_in = 0;
1866                 info->uses_dma_out = 0;
1867                 info->line = i;
1868 @@ -4939,64 +4938,16 @@
1869  #endif
1870  
1871  #ifndef CONFIG_SVINTO_SIM
1872 +#ifndef CONFIG_ETRAX_KGDB
1873         /* Not needed in simulator.  May only complicate stuff. */
1874         /* hook the irq's for DMA channel 6 and 7, serial output and input, and some more... */
1875  
1876 -       if (request_irq(SERIAL_IRQ_NBR, ser_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial ", NULL))
1877 -               panic("irq8");
1878 -
1879 -#ifdef CONFIG_ETRAX_SERIAL_PORT0
1880 -#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
1881 -       if (request_irq(SER0_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_DISABLED, "serial 0 dma tr", NULL))
1882 -               panic("irq22");
1883 -#endif
1884 -#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
1885 -       if (request_irq(SER0_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_DISABLED, "serial 0 dma rec", NULL))
1886 -               panic("irq23");
1887 -#endif
1888 -#endif
1889 -
1890 -#ifdef CONFIG_ETRAX_SERIAL_PORT1
1891 -#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT
1892 -       if (request_irq(SER1_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_DISABLED, "serial 1 dma tr", NULL))
1893 -               panic("irq24");
1894 -#endif
1895 -#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN
1896 -       if (request_irq(SER1_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_DISABLED, "serial 1 dma rec", NULL))
1897 -               panic("irq25");
1898 -#endif
1899 -#endif
1900 -#ifdef CONFIG_ETRAX_SERIAL_PORT2
1901 -       /* DMA Shared with par0 (and SCSI0 and ATA) */
1902 -#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
1903 -       if (request_irq(SER2_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 2 dma tr", NULL))
1904 -               panic("irq18");
1905 -#endif
1906 -#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
1907 -       if (request_irq(SER2_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 2 dma rec", NULL))
1908 -               panic("irq19");
1909 -#endif
1910 -#endif
1911 -#ifdef CONFIG_ETRAX_SERIAL_PORT3
1912 -       /* DMA Shared with par1 (and SCSI1 and Extern DMA 0) */
1913 -#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT
1914 -       if (request_irq(SER3_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 3 dma tr", NULL))
1915 -               panic("irq20");
1916 -#endif
1917 -#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN
1918 -       if (request_irq(SER3_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 3 dma rec", NULL))
1919 -               panic("irq21");
1920 -#endif
1921 -#endif
1922 +       if (request_irq(SERIAL_IRQ_NBR, ser_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial ", driver))
1923 +               panic("%s: Failed to request irq8", __FUNCTION__);
1924  
1925 -#ifdef CONFIG_ETRAX_SERIAL_FLUSH_DMA_FAST
1926 -       if (request_irq(TIMER1_IRQ_NBR, timeout_interrupt, IRQF_SHARED | IRQF_DISABLED,
1927 -                      "fast serial dma timeout", NULL)) {
1928 -               printk(KERN_CRIT "err: timer1 irq\n");
1929 -       }
1930  #endif
1931  #endif /* CONFIG_SVINTO_SIM */
1932 -       debug_write_function = rs_debug_write_function;
1933 +
1934         return 0;
1935  }
1936  
1937 --- linux-2.6.19.2.orig/drivers/serial/crisv10.h        2007-01-10 20:10:37.000000000 +0100
1938 +++ linux-2.6.19.2.dev/drivers/serial/crisv10.h 2006-10-13 14:44:38.000000000 +0200
1939 @@ -9,6 +9,8 @@
1940  
1941  #include <linux/circ_buf.h>
1942  #include <asm/termios.h>
1943 +#include <asm/dma.h>
1944 +#include <asm/arch/io_interface_mux.h>
1945  
1946  /* Software state per channel */
1947  
1948 @@ -61,6 +63,19 @@
1949         u8              dma_in_enabled:1;  /* Set to 1 if DMA should be used */
1950  
1951         /* end of fields defined in rs_table[] in .c-file */
1952 +       int             dma_owner;
1953 +       unsigned int    dma_in_nbr;
1954 +       unsigned int    dma_out_nbr;
1955 +       unsigned int    dma_in_irq_nbr;
1956 +       unsigned int    dma_out_irq_nbr;
1957 +       unsigned long   dma_in_irq_flags;
1958 +       unsigned long   dma_out_irq_flags;
1959 +       char            *dma_in_irq_description;
1960 +       char            *dma_out_irq_description;
1961 +
1962 +       enum cris_io_interface io_if;
1963 +       char            *io_if_description;
1964 +
1965         u8              uses_dma_in;  /* Set to 1 if DMA is used */
1966         u8              uses_dma_out; /* Set to 1 if DMA is used */
1967         u8              forced_eop;   /* a fifo eop has been forced */
1968 --- linux-2.6.19.2.orig/drivers/serial/crisv32.c        1970-01-01 01:00:00.000000000 +0100
1969 +++ linux-2.6.19.2.dev/drivers/serial/crisv32.c 2007-01-05 09:59:53.000000000 +0100
1970 @@ -0,0 +1,2333 @@
1971 +/* $Id: crisv32.c,v 1.78 2007/01/05 08:59:53 starvik Exp $
1972 + *
1973 + * Serial port driver for the ETRAX FS chip
1974 + *
1975 + *    Copyright (C) 1998-2006  Axis Communications AB
1976 + *
1977 + *    Many, many authors. Based once upon a time on serial.c for 16x50.
1978 + *
1979 + *    Johan Adolfsson - port to ETRAX FS
1980 + *    Mikael Starvik - port to serial_core framework
1981 + *
1982 + */
1983 +
1984 +#include <linux/module.h>
1985 +#include <linux/init.h>
1986 +#include <linux/console.h>
1987 +#include <linux/types.h>
1988 +#include <linux/errno.h>
1989 +#include <linux/serial_core.h>
1990 +
1991 +#include <asm/io.h>
1992 +#include <asm/irq.h>
1993 +#include <asm/system.h>
1994 +#include <asm/uaccess.h>
1995 +
1996 +#include <asm/arch/dma.h>
1997 +#include <asm/arch/system.h>
1998 +#include <asm/arch/pinmux.h>
1999 +#include <asm/arch/hwregs/dma.h>
2000 +#include <asm/arch/hwregs/reg_rdwr.h>
2001 +#include <asm/arch/hwregs/ser_defs.h>
2002 +#include <asm/arch/hwregs/dma_defs.h>
2003 +#include <asm/arch/hwregs/gio_defs.h>
2004 +#include <asm/arch/hwregs/intr_vect_defs.h>
2005 +#include <asm/arch/hwregs/reg_map.h>
2006 +
2007 +#define UART_NR 5 /* 4 ports + dummy port */
2008 +#define SERIAL_RECV_DESCRIPTORS 8
2009 +
2010 +/* We only buffer 255 characters here, no need for more tx descriptors. */
2011 +#define SERIAL_TX_DESCRIPTORS 4
2012 +
2013 +/* Kept for experimental purposes. */
2014 +#define ETRAX_SER_FIFO_SIZE 1
2015 +#define SERIAL_DESCR_BUF_SIZE 256
2016 +#define regi_NULL 0
2017 +#define DMA_WAIT_UNTIL_RESET(inst)                     \
2018 +  do {                                                 \
2019 +       reg_dma_rw_stat r;                              \
2020 +       do {                                            \
2021 +               r = REG_RD(dma, (inst), rw_stat);       \
2022 +       } while (r.mode != regk_dma_rst);               \
2023 +  } while (0)
2024 +
2025 +/* Macro to set up control lines for a port. */
2026 +#define SETUP_PINS(port) \
2027 +       if (serial_cris_ports[port].used) { \
2028 +       if (strcmp(CONFIG_ETRAX_SER##port##_DTR_BIT, "")) \
2029 +               crisv32_io_get_name(&serial_cris_ports[port].dtr_pin, \
2030 +                                   CONFIG_ETRAX_SER##port##_DTR_BIT); \
2031 +       else \
2032 +               serial_cris_ports[port].dtr_pin = dummy_pin; \
2033 +       if (strcmp(CONFIG_ETRAX_SER##port##_DSR_BIT, "")) \
2034 +               crisv32_io_get_name(&serial_cris_ports[port].dsr_pin, \
2035 +                                   CONFIG_ETRAX_SER##port##_DSR_BIT); \
2036 +       else \
2037 +               serial_cris_ports[port].dsr_pin = dummy_pin; \
2038 +       if (strcmp(CONFIG_ETRAX_SER##port##_RI_BIT, "")) \
2039 +               crisv32_io_get_name(&serial_cris_ports[port].ri_pin, \
2040 +                                   CONFIG_ETRAX_SER##port##_RI_BIT); \
2041 +       else \
2042 +               serial_cris_ports[port].ri_pin = dummy_pin; \
2043 +       if (strcmp(CONFIG_ETRAX_SER##port##_CD_BIT, "")) \
2044 +               crisv32_io_get_name(&serial_cris_ports[port].cd_pin, \
2045 +                                   CONFIG_ETRAX_SER##port##_CD_BIT); \
2046 +       else \
2047 +               serial_cris_ports[port].cd_pin = dummy_pin; \
2048 +       }
2049 +
2050 +/* Set a serial port register if anything has changed. */
2051 +#define MODIFY_REG(instance, reg, var)                 \
2052 +  if (REG_RD_INT(ser, instance, reg)                   \
2053 +      != REG_TYPE_CONV(int, reg_ser_##reg, var))       \
2054 +      REG_WR(ser, instance, reg, var);
2055 +
2056 +/*
2057 + * Regarding RS485 operation in crisv32 serial driver.
2058 + * ---------------------------------------------------
2059 + * RS485 can be run in two modes, full duplex using four wires (485FD) and
2060 + * half duplex using two wires (485HD). The default mode of each serial port 
2061 + * is configured in the kernel configuration. The available modes are: 
2062 + * RS-232, RS-485 half duplex, and RS-485 full duplex. 
2063 + *
2064 + * In the 485HD mode the direction of the data bus must be able to switch.
2065 + * The direction of the transceiver is controlled by the RTS signal. Hence 
2066 + * the auto_rts function in the ETRAX FS chip is enabled in this mode, which 
2067 + * automatically toggle RTS when transmitting. The initial direction of the 
2068 + * port is receiving.
2069 + *
2070 + * In the 485FD mode two transceivers will be used, one in each direction. 
2071 + * Usually the hardware can handle both 485HD and 485FD, which implies that 
2072 + * one of the transceivers can change direction. Consequently that transceiver 
2073 + * must be tied to operate in the opposite direction of the other one, setting
2074 + * and keeping RTS to a fixed value do this.
2075 + *
2076 + * There are two special "ioctl" that can configure the ports. These two are 
2077 + * left for backward compatible with older applications. The effects of using
2078 + * them are described below:
2079 + * The TIOCSERSETRS485:
2080 + * This ioctl sets a serial port in 232 mode to 485HD mode or vise versa. The
2081 + * state of the port is kept when closing the port. Note that this ioctl has no
2082 + * effect on a serial port in the 485FD mode.
2083 + * The TIOCSERWRRS485:
2084 + * This ioctl set a serial port in 232 mode to 485HD mode and writes the data
2085 + * "included" in the ioctl to the port. The port will then stay in 485HD mode.
2086 + * Using this ioctl on a serial port in the 485HD mode will transmit the data
2087 + * without changing the mode. Using this ioctl on a serial port in 485FD mode
2088 + * will not change the mode and simply send the data using the 485FD mode.
2089 + */
2090 +
2091 +#define TYPE_232 0
2092 +#define TYPE_485HD 1
2093 +#define TYPE_485FD 2
2094 +
2095 +struct etrax_recv_buffer {
2096 +       struct etrax_recv_buffer *next;
2097 +       unsigned short length;
2098 +       unsigned char error;
2099 +       unsigned char pad;
2100 +
2101 +       unsigned char buffer[0];
2102 +};
2103 +
2104 +struct uart_cris_port {
2105 +       struct uart_port        port;
2106 +
2107 +       int initialized;
2108 +       int used;
2109 +       int irq;
2110 +
2111 +       /* Used to check if port enabled as well by testing for zero. */
2112 +       reg_scope_instances     regi_ser;
2113 +       reg_scope_instances     regi_dmain;
2114 +       reg_scope_instances     regi_dmaout;
2115 +
2116 +       struct crisv32_iopin    dtr_pin;
2117 +       struct crisv32_iopin    dsr_pin;
2118 +       struct crisv32_iopin    ri_pin;
2119 +       struct crisv32_iopin    cd_pin;
2120 +
2121 +       struct dma_descr_context tr_context_descr
2122 +               __attribute__ ((__aligned__(32)));
2123 +       struct dma_descr_data    tr_descr[SERIAL_TX_DESCRIPTORS]
2124 +               __attribute__ ((__aligned__(32)));
2125 +       struct dma_descr_context rec_context_descr
2126 +               __attribute__ ((__aligned__(32)));
2127 +       struct dma_descr_data    rec_descr[SERIAL_RECV_DESCRIPTORS]
2128 +               __attribute__ ((__aligned__(32)));
2129 +
2130 +       /* This is the first one in the list the HW is working on now. */
2131 +       struct dma_descr_data*   first_tx_descr;
2132 +
2133 +       /* This is the last one in the list the HW is working on now. */
2134 +       struct dma_descr_data*   last_tx_descr;
2135 +
2136 +       /* This is how many characters the HW is working on now. */
2137 +       unsigned int             tx_pending_chars;
2138 +
2139 +       int                      tx_started;
2140 +       unsigned int             cur_rec_descr;
2141 +       struct etrax_recv_buffer *first_recv_buffer;
2142 +       struct etrax_recv_buffer *last_recv_buffer;
2143 +
2144 +       unsigned int            recv_cnt;
2145 +       unsigned int            max_recv_cnt;
2146 +
2147 +       /* The time for 1 char, in usecs. */
2148 +       unsigned long           char_time_usec;
2149 +
2150 +       /* Last tx usec in the jiffies. */
2151 +       unsigned long           last_tx_active_usec;
2152 +
2153 +       /* Last tx time in jiffies. */
2154 +       unsigned long           last_tx_active;
2155 +
2156 +       /* Last rx usec in the jiffies. */
2157 +       unsigned long           last_rx_active_usec;
2158 +
2159 +       /* Last rx time in jiffies. */
2160 +       unsigned long           last_rx_active;
2161 +
2162 +#ifdef CONFIG_ETRAX_RS485
2163 +       /* RS-485 support, duh. */
2164 +       struct rs485_control    rs485;
2165 +#endif
2166 +       int                     port_type;
2167 +};
2168 +
2169 +extern struct uart_driver serial_cris_driver;
2170 +static struct uart_port *console_port;
2171 +static int console_baud = 115200;
2172 +static struct uart_cris_port serial_cris_ports[UART_NR] = {
2173 +{
2174 +#ifdef CONFIG_ETRAX_SERIAL_PORT0
2175 +       .used        = 1,
2176 +       .irq         = SER0_INTR_VECT,
2177 +       .regi_ser    = regi_ser0,
2178 +       /*
2179 +        * We initialize the dma stuff like this to get a compiler error
2180 +        * if a CONFIG is missing
2181 +        */
2182 +       .regi_dmain  =
2183 +#  ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
2184 +                      regi_dma7,
2185 +#  endif
2186 +#  ifdef CONFIG_ETRAX_SERIAL_PORT0_NO_DMA_IN
2187 +                      regi_NULL,
2188 +#  endif
2189 +
2190 +       .regi_dmaout =
2191 +#  ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
2192 +                      regi_dma6,
2193 +#  endif
2194 +#  ifdef CONFIG_ETRAX_SERIAL_PORT0_NO_DMA_OUT
2195 +                      regi_NULL,
2196 +#  endif
2197 +
2198 +#  ifdef CONFIG_ETRAX_RS485
2199 +#    ifdef CONFIG_ETRAX_SERIAL_PORT0_TYPE_485HD
2200 +       .port_type = TYPE_485HD,
2201 +#    endif
2202 +#    ifdef CONFIG_ETRAX_SERIAL_PORT0_TYPE_485FD
2203 +       .port_type = TYPE_485FD,
2204 +#    endif
2205 +#  endif
2206 +#else
2207 +       .regi_ser    = regi_NULL,
2208 +       .regi_dmain  = regi_NULL,
2209 +       .regi_dmaout = regi_NULL,
2210 +#endif
2211 +},  /* ttyS0 */
2212 +{
2213 +#ifdef CONFIG_ETRAX_SERIAL_PORT1
2214 +       .used        = 1,
2215 +       .irq         = SER1_INTR_VECT,
2216 +       .regi_ser    = regi_ser1,
2217 +       .regi_dmain  =
2218 +#  ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA5_IN
2219 +                      regi_dma5,
2220 +#  endif
2221 +#  ifdef CONFIG_ETRAX_SERIAL_PORT1_NO_DMA_IN
2222 +                      regi_NULL,
2223 +#  endif
2224 +
2225 +       .regi_dmaout =
2226 +#  ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA4_OUT
2227 +                      regi_dma4,
2228 +#  endif
2229 +#  ifdef CONFIG_ETRAX_SERIAL_PORT1_NO_DMA_OUT
2230 +                      regi_NULL,
2231 +#  endif
2232 +
2233 +#  ifdef CONFIG_ETRAX_RS485
2234 +#    ifdef CONFIG_ETRAX_SERIAL_PORT1_TYPE_485HD
2235 +       .port_type = TYPE_485HD,
2236 +#    endif
2237 +#    ifdef CONFIG_ETRAX_SERIAL_PORT1_TYPE_485FD
2238 +       .port_type = TYPE_485FD,
2239 +#    endif
2240 +#  endif
2241 +#else
2242 +       .regi_ser    = regi_NULL,
2243 +       .regi_dmain  = regi_NULL,
2244 +       .regi_dmaout = regi_NULL,
2245 +#endif
2246 +},  /* ttyS1 */
2247 +{
2248 +#ifdef CONFIG_ETRAX_SERIAL_PORT2
2249 +       .used       = 1,
2250 +       .irq        = SER2_INTR_VECT,
2251 +       .regi_ser    = regi_ser2,
2252 +       .regi_dmain  =
2253 +#  ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
2254 +                      regi_dma3,
2255 +#  endif
2256 +#  ifdef CONFIG_ETRAX_SERIAL_PORT2_NO_DMA_IN
2257 +                      regi_NULL,
2258 +#  endif
2259 +
2260 +        .regi_dmaout =
2261 +#  ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
2262 +                      regi_dma2,
2263 +#  endif
2264 +#  ifdef CONFIG_ETRAX_SERIAL_PORT2_NO_DMA_OUT
2265 +                      regi_NULL,
2266 +#  endif
2267 +
2268 +#  ifdef CONFIG_ETRAX_RS485
2269 +#    ifdef CONFIG_ETRAX_SERIAL_PORT2_TYPE_485HD
2270 +       .port_type = TYPE_485HD,
2271 +#    endif
2272 +#    ifdef CONFIG_ETRAX_SERIAL_PORT2_TYPE_485FD
2273 +       .port_type = TYPE_485FD,
2274 +#    endif
2275 +#  endif
2276 +#else
2277 +       .regi_ser    = regi_NULL,
2278 +       .regi_dmain  = regi_NULL,
2279 +       .regi_dmaout = regi_NULL,
2280 +#endif
2281 +},  /* ttyS2 */
2282 +{
2283 +#ifdef CONFIG_ETRAX_SERIAL_PORT3
2284 +       .used       = 1,
2285 +       .irq        = SER3_INTR_VECT,
2286 +       .regi_ser    = regi_ser3,
2287 +       .regi_dmain  =
2288 +#  ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA9_IN
2289 +                      regi_dma9,
2290 +#  endif
2291 +#  ifdef CONFIG_ETRAX_SERIAL_PORT3_NO_DMA_IN
2292 +                      regi_NULL,
2293 +#  endif
2294 +
2295 +        .regi_dmaout =
2296 +#  ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA8_OUT
2297 +                      regi_dma8,
2298 +#  endif
2299 +#  ifdef CONFIG_ETRAX_SERIAL_PORT3_NO_DMA_OUT
2300 +                      regi_NULL,
2301 +#  endif
2302 +
2303 +#  ifdef CONFIG_ETRAX_RS485
2304 +#    ifdef CONFIG_ETRAX_SERIAL_PORT3_TYPE_485HD
2305 +       .port_type = TYPE_485HD,
2306 +#    endif
2307 +#    ifdef CONFIG_ETRAX_SERIAL_PORT3_TYPE_485FD
2308 +       .port_type = TYPE_485FD,
2309 +#    endif
2310 +#  endif
2311 +#else
2312 +       .regi_ser    = regi_NULL,
2313 +       .regi_dmain  = regi_NULL,
2314 +       .regi_dmaout = regi_NULL,
2315 +#endif
2316 +},  /* ttyS3 */
2317 +{
2318 +#ifdef CONFIG_ETRAX_DEBUG_PORT_NULL
2319 +       .used        = 1,
2320 +#endif
2321 +       .regi_ser    = regi_NULL
2322 +}   /* Dummy console port */
2323 +
2324 +};
2325 +
2326 +/* Dummy pin used for unused CD, DSR, DTR and RI signals. */
2327 +static unsigned long io_dummy;
2328 +static struct crisv32_ioport dummy_port =
2329 +{
2330 +       &io_dummy,
2331 +       &io_dummy,
2332 +       &io_dummy,
2333 +       18
2334 +};
2335 +static struct crisv32_iopin dummy_pin =
2336 +{
2337 +       &dummy_port,
2338 +       0
2339 +};
2340 +
2341 +static int selected_console =
2342 +#if defined(CONFIG_ETRAX_DEBUG_PORT0)
2343 +0;
2344 +#elif defined(CONFIG_ETRAX_DEBUG_PORT1)
2345 +1;
2346 +#elif defined(CONFIG_ETRAX_DEBUG_PORT2)
2347 +2;
2348 +#elif defined(CONFIG_ETRAX_DEBUG_PORT3)
2349 +3;
2350 +#else  /* CONFIG_ETRAX_DEBUG_PORT_NULL */
2351 +4;
2352 +#endif
2353 +
2354 +extern void reset_watchdog(void);
2355 +
2356 +/*
2357 + * Interrupts are disabled on entering
2358 + */
2359 +static void
2360 +cris_console_write(struct console *co, const char *s, unsigned int count)
2361 +{
2362 +       struct uart_cris_port *up;
2363 +       int i;
2364 +       reg_ser_r_stat_din stat;
2365 +       reg_ser_rw_tr_dma_en tr_dma_en, old;
2366 +
2367 +       up = &serial_cris_ports[selected_console];
2368 +
2369 +       /*
2370 +        * This function isn't covered by the struct uart_ops, so we
2371 +        * have to check manually that the port really is there,
2372 +        * configured and live.
2373 +        */
2374 +       if (!up->regi_ser)
2375 +               return;
2376 +
2377 +       /* Switch to manual mode. */
2378 +       tr_dma_en = old = REG_RD (ser, up->regi_ser, rw_tr_dma_en);
2379 +       if (tr_dma_en.en == regk_ser_yes) {
2380 +               tr_dma_en.en = regk_ser_no;
2381 +               REG_WR(ser, up->regi_ser, rw_tr_dma_en, tr_dma_en);
2382 +       }
2383 +
2384 +       /* Send data. */
2385 +       for (i = 0; i < count; i++) {
2386 +               /* LF -> CRLF */
2387 +               if (s[i] == '\n') {
2388 +                       do {
2389 +                               stat = REG_RD (ser, up->regi_ser, r_stat_din);
2390 +                       } while (!stat.tr_rdy);
2391 +                       REG_WR_INT (ser, up->regi_ser, rw_dout, '\r');
2392 +               }
2393 +               /* Wait until transmitter is ready and send. */
2394 +               do {
2395 +                       stat = REG_RD (ser, up->regi_ser, r_stat_din);
2396 +               } while (!stat.tr_rdy);
2397 +               REG_WR_INT (ser, up->regi_ser, rw_dout, s[i]);
2398 +
2399 +               /* Feed watchdog, because this may take looong time. */
2400 +               reset_watchdog();
2401 +       }
2402 +
2403 +       /* Restore mode. */
2404 +       if (tr_dma_en.en != old.en)
2405 +               REG_WR(ser, up->regi_ser, rw_tr_dma_en, old);
2406 +}
2407 +
2408 +static void cris_serial_port_init(struct uart_port *port, int line);
2409 +static int __init
2410 +cris_console_setup(struct console *co, char *options)
2411 +{
2412 +       struct uart_port *port;
2413 +       int baud = 115200;
2414 +       int bits = 8;
2415 +       int parity = 'n';
2416 +       int flow = 'n';
2417 +
2418 +       if (co->index >= UART_NR)
2419 +               co->index = 0;
2420 +       if (options)
2421 +               selected_console = co->index;
2422 +       port = &serial_cris_ports[selected_console].port;
2423 +        console_port = port;
2424 +
2425 +       if (options)
2426 +               uart_parse_options(options, &baud, &parity, &bits, &flow);
2427 +       console_baud = baud;
2428 +       cris_serial_port_init(port, selected_console);
2429 +       co->index = port->line;
2430 +       uart_set_options(port, co, baud, parity, bits, flow);
2431 +
2432 +       return 0;
2433 +}
2434 +
2435 +static struct tty_driver*
2436 +cris_console_device(struct console* co, int *index)
2437 +{
2438 +       struct uart_driver *p = co->data;
2439 +       *index = selected_console;
2440 +       return p->tty_driver;
2441 +}
2442 +
2443 +static struct console cris_console = {
2444 +       .name           = "ttyS",
2445 +       .write          = cris_console_write,
2446 +       .device         = cris_console_device,
2447 +       .setup          = cris_console_setup,
2448 +       .flags          = CON_PRINTBUFFER,
2449 +       .index          = -1,
2450 +       .data           = &serial_cris_driver,
2451 +};
2452 +
2453 +#define SERIAL_CRIS_CONSOLE    &cris_console
2454 +
2455 +struct uart_driver serial_cris_driver = {
2456 +       .owner                  = THIS_MODULE,
2457 +       .driver_name            = "serial",
2458 +       .dev_name               = "ttyS",
2459 +       .major                  = TTY_MAJOR,
2460 +       .minor                  = 64,
2461 +       .nr                     = UART_NR,
2462 +       .cons                   = SERIAL_CRIS_CONSOLE,
2463 +};
2464 +
2465 +static int inline crisv32_serial_get_rts(struct uart_cris_port *up)
2466 +{
2467 +       reg_scope_instances regi_ser = up->regi_ser;
2468 +       /*
2469 +        * Return what the user has controlled rts to or
2470 +        * what the pin is? (if auto_rts is used it differs during tx)
2471 +        */
2472 +       reg_ser_r_stat_din rstat = REG_RD(ser, regi_ser, r_stat_din);
2473 +       return !(rstat.rts_n == regk_ser_active);
2474 +}
2475 +
2476 +/*
2477 + * A set = 0 means 3.3V on the pin, bitvalue: 0=active, 1=inactive
2478 + *                                            0=0V    , 1=3.3V
2479 + */
2480 +static inline void crisv32_serial_set_rts(struct uart_cris_port *up, int set)
2481 +{
2482 +       reg_scope_instances regi_ser = up->regi_ser;
2483 +
2484 +#ifdef CONFIG_ETRAX_RS485
2485 +       /* Never toggle RTS if port is in 485 mode. If port is in 485FD mode we
2486 +        * do not want to send with the reciever and for 485HD mode auto_rts
2487 +        * take care of the RTS for us. 
2488 +        */
2489 +       if (!up->rs485.enabled) {
2490 +#else
2491 +       {
2492 +#endif
2493 +               unsigned long flags;
2494 +               reg_ser_rw_rec_ctrl rec_ctrl;
2495 +
2496 +               local_irq_save(flags);
2497 +               rec_ctrl = REG_RD(ser, regi_ser, rw_rec_ctrl);
2498 +               if (set)
2499 +                       rec_ctrl.rts_n = regk_ser_active;
2500 +               else
2501 +                       rec_ctrl.rts_n = regk_ser_inactive;
2502 +               REG_WR(ser, regi_ser, rw_rec_ctrl, rec_ctrl);
2503 +               local_irq_restore(flags);
2504 +       }
2505 +}
2506 +
2507 +/* Input */
2508 +static int inline crisv32_serial_get_cts(struct uart_cris_port *up)
2509 +{
2510 +       reg_scope_instances regi_ser = up->regi_ser;
2511 +       reg_ser_r_stat_din rstat = REG_RD(ser, regi_ser, r_stat_din);
2512 +       return (rstat.cts_n == regk_ser_active);
2513 +}
2514 +
2515 +/*
2516 + * Send a single character for XON/XOFF purposes.  We do it in this separate
2517 + * function instead of the alternative support port.x_char, in the ...start_tx
2518 + * function, so we don't mix up this case with possibly enabling transmission
2519 + * of queued-up data (in case that's disabled after *receiving* an XOFF or
2520 + * negative CTS).  This function is used for both DMA and non-DMA case; see HW
2521 + * docs specifically blessing sending characters manually when DMA for
2522 + * transmission is enabled and running.  We may be asked to transmit despite
2523 + * the transmitter being disabled by a ..._stop_tx call so we need to enable
2524 + * it temporarily but restore the state afterwards.
2525 + *
2526 + * Beware: I'm not sure how the RS-485 stuff is supposed to work.  Using
2527 + * XON/XOFF seems problematic if there are several controllers, but if it's
2528 + * actually RS-422 (multi-drop; one sender and multiple receivers), it might
2529 + * Just Work, so don't bail out just because it looks a little suspicious.
2530 + */
2531 +
2532 +void serial_cris_send_xchar(struct uart_port *port, char ch)
2533 +{
2534 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
2535 +       reg_ser_rw_dout dout = { .data = ch };
2536 +       reg_ser_rw_ack_intr ack_intr = { .tr_rdy = regk_ser_yes };
2537 +       reg_ser_r_stat_din rstat;
2538 +       reg_ser_rw_tr_ctrl prev_tr_ctrl, tr_ctrl;
2539 +       reg_scope_instances regi_ser = up->regi_ser;
2540 +       unsigned long flags;
2541 +
2542 +       /*
2543 +        * Wait for tr_rdy in case a character is already being output.  Make
2544 +        * sure we have integrity between the register reads and the writes
2545 +        * below, but don't busy-wait with interrupts off and the port lock
2546 +        * taken.
2547 +        */
2548 +       spin_lock_irqsave(&port->lock, flags);
2549 +       do {
2550 +               spin_unlock_irqrestore(&port->lock, flags);
2551 +               spin_lock_irqsave(&port->lock, flags);
2552 +               prev_tr_ctrl = tr_ctrl = REG_RD(ser, regi_ser, rw_tr_ctrl);
2553 +               rstat = REG_RD(ser, regi_ser, r_stat_din);
2554 +       } while (!rstat.tr_rdy);
2555 +
2556 +       /*
2557 +        * Ack an interrupt if one was just issued for the previous character
2558 +        * that was output.  This is required for non-DMA as the interrupt is
2559 +        * used as the only indicator that the transmitter is ready and it
2560 +        * isn't while this x_char is being transmitted.
2561 +        */
2562 +       REG_WR(ser, regi_ser, rw_ack_intr, ack_intr);
2563 +
2564 +       /* Enable the transmitter in case it was disabled. */
2565 +       tr_ctrl.stop = 0;
2566 +       REG_WR(ser, regi_ser, rw_tr_ctrl, tr_ctrl);
2567 +
2568 +       /*
2569 +        * Finally, send the blessed character; nothing should stop it now,
2570 +        * except for an xoff-detected state, which we'll handle below.
2571 +        */
2572 +       REG_WR(ser, regi_ser, rw_dout, dout);
2573 +       up->port.icount.tx++;
2574 +
2575 +       /* There might be an xoff state to clear. */
2576 +       rstat = REG_RD(ser, up->regi_ser, r_stat_din);
2577 +
2578 +       /*
2579 +        * Clear any xoff state that *may* have been there to
2580 +        * inhibit transmission of the character.
2581 +        */
2582 +       if (rstat.xoff_detect) {
2583 +               reg_ser_rw_xoff_clr xoff_clr = { .clr = 1 };
2584 +               REG_WR(ser, regi_ser, rw_xoff_clr, xoff_clr);
2585 +               reg_ser_rw_tr_dma_en tr_dma_en
2586 +                       = REG_RD(ser, regi_ser, rw_tr_dma_en);
2587 +
2588 +               /*
2589 +                * If we had an xoff state but cleared it, instead sneak in a
2590 +                * disabled state for the transmitter, after the character we
2591 +                * sent.  Thus we keep the port disabled, just as if the xoff
2592 +                * state was still in effect (or actually, as if stop_tx had
2593 +                * been called, as we stop DMA too).
2594 +                */
2595 +               prev_tr_ctrl.stop = 1;
2596 +
2597 +               tr_dma_en.en = 0;
2598 +               REG_WR(ser, regi_ser, rw_tr_dma_en, tr_dma_en);
2599 +       }
2600 +
2601 +       /* Restore "previous" enabled/disabled state of the transmitter. */
2602 +       REG_WR(ser, regi_ser, rw_tr_ctrl, prev_tr_ctrl);
2603 +
2604 +       spin_unlock_irqrestore(&port->lock, flags);
2605 +}
2606 +
2607 +static void transmit_chars_dma(struct uart_cris_port *up);
2608 +
2609 +/*
2610 + * Do not spin_lock_irqsave or disable interrupts by other means here; it's
2611 + * already done by the caller.
2612 + */
2613 +
2614 +static void serial_cris_start_tx(struct uart_port *port)
2615 +{
2616 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
2617 +       reg_scope_instances regi_ser = up->regi_ser;
2618 +       reg_ser_rw_tr_ctrl tr_ctrl;
2619 +
2620 +       tr_ctrl = REG_RD(ser, regi_ser, rw_tr_ctrl);
2621 +       tr_ctrl.stop = regk_ser_no;
2622 +       REG_WR(ser, regi_ser, rw_tr_ctrl, tr_ctrl);
2623 +       if (!up->regi_dmaout) {
2624 +               reg_ser_rw_intr_mask intr_mask =
2625 +                       REG_RD(ser, regi_ser, rw_intr_mask);
2626 +               intr_mask.tr_rdy = regk_ser_yes;
2627 +               REG_WR(ser, regi_ser, rw_intr_mask, intr_mask);
2628 +       } else {
2629 +               /*
2630 +                * We're called possibly to re-enable transmission after it
2631 +                * has been disabled.  If so, DMA needs to be re-enabled.
2632 +                */
2633 +               reg_ser_rw_tr_dma_en tr_dma_en = { .en = 1 };
2634 +               REG_WR(ser, regi_ser, rw_tr_dma_en, tr_dma_en);
2635 +               transmit_chars_dma(up);
2636 +       }
2637 +}
2638 +
2639 +/*
2640 + * This function handles both the DMA and non-DMA case by ordering the
2641 + * transmitter to stop of after the current character.  We don't need to wait
2642 + * for any such character to be completely transmitted; we do that where it
2643 + * matters, like in serial_cris_set_termios.  Don't busy-wait here; see
2644 + * Documentation/serial/driver: this function is called within
2645 + * spin_lock_irq{,save} and thus separate ones would be disastrous (when SMP).
2646 + * There's no documented need to set the txd pin to any particular value;
2647 + * break setting is controlled solely by serial_cris_break_ctl.
2648 + */
2649 +
2650 +static void serial_cris_stop_tx(struct uart_port *port)
2651 +{
2652 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
2653 +       reg_scope_instances regi_ser = up->regi_ser;
2654 +       reg_ser_rw_tr_ctrl tr_ctrl;
2655 +       reg_ser_rw_intr_mask intr_mask;
2656 +       reg_ser_rw_tr_dma_en tr_dma_en = {0};
2657 +       reg_ser_rw_xoff_clr xoff_clr = {0};
2658 +
2659 +       /*
2660 +        * For the non-DMA case, we'd get a tr_rdy interrupt that we're not
2661 +        * interested in as we're not transmitting any characters.  For the
2662 +        * DMA case, that interrupt is already turned off, but no reason to
2663 +        * waste code on conditionals here.
2664 +        */
2665 +       intr_mask = REG_RD(ser, regi_ser, rw_intr_mask);
2666 +       intr_mask.tr_rdy = regk_ser_no;
2667 +       REG_WR(ser, regi_ser, rw_intr_mask, intr_mask);
2668 +
2669 +       tr_ctrl = REG_RD(ser, regi_ser, rw_tr_ctrl);
2670 +       tr_ctrl.stop = 1;
2671 +       REG_WR(ser, regi_ser, rw_tr_ctrl, tr_ctrl);
2672 +
2673 +       /*
2674 +        * Always clear possible hardware xoff-detected state here, no need to
2675 +        * unnecessary consider mctrl settings and when they change.  We clear
2676 +        * it here rather than in start_tx: both functions are called as the
2677 +        * effect of XOFF processing, but start_tx is also called when upper
2678 +        * levels tell the driver that there are more characters to send, so
2679 +        * avoid adding code there.
2680 +        */
2681 +       xoff_clr.clr = 1;
2682 +       REG_WR(ser, regi_ser, rw_xoff_clr, xoff_clr);
2683 +
2684 +       /*
2685 +        * Disable transmitter DMA, so that if we're in XON/XOFF, we can send
2686 +        * those single characters without also giving go-ahead for queued up
2687 +        * DMA data.
2688 +        */
2689 +       tr_dma_en.en = 0;
2690 +       REG_WR(ser, regi_ser, rw_tr_dma_en, tr_dma_en);
2691 +}
2692 +
2693 +static void serial_cris_stop_rx(struct uart_port *port)
2694 +{
2695 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
2696 +       reg_scope_instances regi_ser = up->regi_ser;
2697 +       reg_ser_rw_rec_ctrl rec_ctrl = REG_RD(ser, regi_ser, rw_rec_ctrl);
2698 +
2699 +       rec_ctrl.en = regk_ser_no;
2700 +       REG_WR(ser, regi_ser, rw_rec_ctrl, rec_ctrl);
2701 +}
2702 +
2703 +static void serial_cris_enable_ms(struct uart_port *port)
2704 +{
2705 +}
2706 +
2707 +static void check_modem_status(struct uart_cris_port *up)
2708 +{
2709 +}
2710 +
2711 +static unsigned int serial_cris_tx_empty(struct uart_port *port)
2712 +{
2713 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
2714 +       unsigned long flags;
2715 +       unsigned int ret;
2716 +       reg_ser_r_stat_din rstat = {0};
2717 +
2718 +       spin_lock_irqsave(&up->port.lock, flags);
2719 +       if (up->regi_dmaout) {
2720 +               /*
2721 +                * For DMA, before looking at r_stat, we need to check that we
2722 +                * either haven't actually started or that end-of-list is
2723 +                * reached, else a tr_empty indication is just an internal
2724 +                * state.  The caller qualifies, if needed, that the
2725 +                * port->info.xmit buffer is empty, so we don't need to
2726 +                * check that.
2727 +                */
2728 +               reg_dma_rw_stat status = REG_RD(dma, up->regi_dmaout, rw_stat);
2729 +
2730 +               if (!up->tx_started) {
2731 +                       ret = 1;
2732 +                       goto done;
2733 +               }
2734 +
2735 +               if (status.list_state != regk_dma_data_at_eol) {
2736 +                       ret = 0;
2737 +                       goto done;
2738 +               }
2739 +       }
2740 +
2741 +       rstat = REG_RD(ser, up->regi_ser, r_stat_din);
2742 +       ret = rstat.tr_empty ? TIOCSER_TEMT : 0;
2743 +
2744 + done:
2745 +       spin_unlock_irqrestore(&up->port.lock, flags);
2746 +       return ret;
2747 +}
2748 +static unsigned int serial_cris_get_mctrl(struct uart_port *port)
2749 +{
2750 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
2751 +       unsigned int ret;
2752 +
2753 +       ret = 0;
2754 +        if (crisv32_serial_get_rts(up))
2755 +               ret |= TIOCM_RTS;
2756 +       if (crisv32_io_rd(&up->dtr_pin))
2757 +               ret |= TIOCM_DTR;
2758 +       if (crisv32_io_rd(&up->cd_pin))
2759 +               ret |= TIOCM_CD;
2760 +       if (crisv32_io_rd(&up->ri_pin))
2761 +               ret |= TIOCM_RI;
2762 +       if (!crisv32_io_rd(&up->dsr_pin))
2763 +               ret |= TIOCM_DSR;
2764 +       if (crisv32_serial_get_cts(up))
2765 +               ret |= TIOCM_CTS;
2766 +       return ret;
2767 +}
2768 +
2769 +static void serial_cris_set_mctrl(struct uart_port *port, unsigned int mctrl)
2770 +{
2771 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
2772 +
2773 +       crisv32_serial_set_rts(up, mctrl & TIOCM_RTS ? 1 : 0);
2774 +       crisv32_io_set(&up->dtr_pin, mctrl & TIOCM_DTR ? 1 : 0);
2775 +       crisv32_io_set(&up->ri_pin, mctrl & TIOCM_RNG ? 1 : 0);
2776 +       crisv32_io_set(&up->cd_pin, mctrl & TIOCM_CD ? 1 : 0);
2777 +}
2778 +
2779 +static void serial_cris_break_ctl(struct uart_port *port, int break_state)
2780 +{
2781 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
2782 +       unsigned long flags;
2783 +       reg_ser_rw_tr_ctrl tr_ctrl;
2784 +       reg_ser_rw_tr_dma_en tr_dma_en;
2785 +       reg_ser_rw_intr_mask intr_mask;
2786 +
2787 +       spin_lock_irqsave(&up->port.lock, flags);
2788 +       tr_ctrl = REG_RD(ser, up->regi_ser, rw_tr_ctrl);
2789 +       tr_dma_en = REG_RD(ser, up->regi_ser, rw_tr_dma_en);
2790 +       intr_mask = REG_RD(ser, up->regi_ser, rw_intr_mask);
2791 +
2792 +       if (break_state != 0) { /* Send break */
2793 +               /*
2794 +                * We need to disable DMA (if used) or tr_rdy interrupts if no
2795 +                * DMA.  No need to make this conditional on use of DMA;
2796 +                * disabling will be a no-op for the other mode.
2797 +                */
2798 +               intr_mask.tr_rdy = regk_ser_no;
2799 +               tr_dma_en.en = 0;
2800 +
2801 +               /*
2802 +                * Stop transmission and set the txd pin to 0 after the
2803 +                * current character.  The txd setting will take effect after
2804 +                * any current transmission has completed.
2805 +                */
2806 +               tr_ctrl.stop = 1;
2807 +               tr_ctrl.txd = 0;
2808 +       } else {
2809 +               /* Re-enable either transmit DMA or the serial interrupt. */
2810 +               if (up->regi_dmaout)
2811 +                       tr_dma_en.en = 1;
2812 +               else
2813 +                       intr_mask.tr_rdy = regk_ser_yes;
2814 +
2815 +
2816 +               tr_ctrl.stop = 0;
2817 +               tr_ctrl.txd = 1;
2818 +       }
2819 +       REG_WR(ser, up->regi_ser, rw_tr_ctrl, tr_ctrl);
2820 +       REG_WR(ser, up->regi_ser, rw_tr_dma_en, tr_dma_en);
2821 +       REG_WR(ser, up->regi_ser, rw_intr_mask, intr_mask);
2822 +
2823 +       spin_unlock_irqrestore(&up->port.lock, flags);
2824 +}
2825 +
2826 +/*
2827 + * The output DMA channel is free - use it to send as many chars as
2828 + * possible.
2829 + */
2830 +
2831 +static void
2832 +transmit_chars_dma(struct uart_cris_port *up)
2833 +{
2834 +       struct dma_descr_data *descr, *pending_descr, *dmapos;
2835 +       struct dma_descr_data *last_tx_descr;
2836 +       struct circ_buf *xmit = &up->port.info->xmit;
2837 +       unsigned int sentl = 0;
2838 +       reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
2839 +       reg_dma_rw_stat status;
2840 +       reg_scope_instances regi_dmaout = up->regi_dmaout;
2841 +       unsigned int chars_in_q;
2842 +       unsigned int chars_to_send;
2843 +
2844 +       /* Acknowledge dma data descriptor irq, if there was one. */
2845 +       REG_WR(dma, regi_dmaout, rw_ack_intr, ack_intr);
2846 +
2847 +       /*
2848 +        * First get the amount of bytes sent during the last DMA transfer,
2849 +        * and update xmit accordingly.
2850 +        */
2851 +       status = REG_RD(dma, regi_dmaout, rw_stat);
2852 +       if (status.list_state == regk_dma_data_at_eol || !up->tx_started)
2853 +               dmapos = phys_to_virt((int)up->last_tx_descr->next);
2854 +       else
2855 +               dmapos = phys_to_virt(REG_RD_INT(dma, regi_dmaout, rw_data));
2856 +
2857 +       pending_descr = up->first_tx_descr;
2858 +       while (pending_descr != dmapos) {
2859 +               sentl += pending_descr->after - pending_descr->buf;
2860 +               pending_descr->after = pending_descr->buf = NULL;
2861 +               pending_descr = phys_to_virt((int)pending_descr->next);
2862 +        }
2863 +
2864 +       up->first_tx_descr = pending_descr;
2865 +       last_tx_descr = up->last_tx_descr;
2866 +
2867 +       /* Update stats. */
2868 +       up->port.icount.tx += sentl;
2869 +
2870 +       up->tx_pending_chars -= sentl;
2871 +
2872 +       /* Update xmit buffer. */
2873 +       xmit->tail = (xmit->tail + sentl) & (UART_XMIT_SIZE - 1);
2874 +
2875 +       /*
2876 +        * Find out the largest amount of consecutive bytes we want to send
2877 +        * now.
2878 +        */
2879 +       chars_in_q = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
2880 +
2881 +       if (chars_in_q == 0)
2882 +               /* Tell upper layers that we're now idle. */
2883 +               goto done;
2884 +
2885 +       /* Some of those characters are actually pending output. */
2886 +       chars_to_send = chars_in_q - up->tx_pending_chars;
2887 +
2888 +       /*
2889 +        * Clamp the new number of pending chars to the advertised
2890 +        * one.
2891 +        */
2892 +       if (chars_to_send + up->tx_pending_chars > up->port.fifosize)
2893 +               chars_to_send = up->port.fifosize - up->tx_pending_chars;
2894 +
2895 +       /* If we don't want to send any, we're done. */
2896 +       if (chars_to_send == 0)
2897 +               goto done;
2898 +
2899 +       descr = phys_to_virt((int)last_tx_descr->next);
2900 +
2901 +       /*
2902 +        * We can't send anything if we could make the condition in
2903 +        * the while-loop above (reaping finished descriptors) be met
2904 +        * immediately before the first iteration.  However, don't
2905 +        * mistake the full state for the empty state.
2906 +        */
2907 +       if ((descr == up->first_tx_descr && up->tx_pending_chars != 0)
2908 +           || descr->next == up->first_tx_descr)
2909 +               goto done;
2910 +
2911 +       /* Set up the descriptor for output. */
2912 +       descr->buf = (void*)virt_to_phys(xmit->buf + xmit->tail
2913 +                                        + up->tx_pending_chars);
2914 +       descr->after = descr->buf + chars_to_send;
2915 +       descr->eol = 1;
2916 +       descr->out_eop = 0;
2917 +       descr->intr = 1;
2918 +       descr->wait = 0;
2919 +       descr->in_eop = 0;
2920 +       descr->md = 0;
2921 +       /*
2922 +        * Make sure GCC doesn't move this eol clear before the eol set
2923 +        * above.
2924 +        */
2925 +       barrier();
2926 +       last_tx_descr->eol = 0;
2927 +
2928 +       up->last_tx_descr = descr;
2929 +       up->tx_pending_chars += chars_to_send;
2930 +
2931 +       if (!up->tx_started) {
2932 +               up->tx_started = 1;
2933 +               up->tr_context_descr.next = 0;
2934 +               up->tr_context_descr.saved_data
2935 +                       = (dma_descr_data*)virt_to_phys(descr);
2936 +               up->tr_context_descr.saved_data_buf = descr->buf;
2937 +               DMA_START_CONTEXT(regi_dmaout,
2938 +                                 virt_to_phys(&up->tr_context_descr));
2939 +        } else
2940 +               DMA_CONTINUE_DATA(regi_dmaout);
2941 +
2942 +       /* DMA is now running (hopefully). */
2943 +
2944 + done:
2945 +       if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
2946 +               uart_write_wakeup(&up->port);
2947 +}
2948 +
2949 +static void
2950 +transmit_chars_no_dma(struct uart_cris_port *up)
2951 +{
2952 +       int count;
2953 +       struct circ_buf *xmit = &up->port.info->xmit;
2954 +
2955 +       reg_scope_instances regi_ser = up->regi_ser;
2956 +       reg_ser_r_stat_din rstat;
2957 +       reg_ser_rw_ack_intr ack_intr = { .tr_rdy = regk_ser_yes };
2958 +
2959 +       if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
2960 +               /* No more to send, so disable the interrupt. */
2961 +               reg_ser_rw_intr_mask intr_mask;
2962 +               intr_mask = REG_RD(ser, regi_ser, rw_intr_mask);
2963 +               intr_mask.tr_rdy = 0;
2964 +               intr_mask.tr_empty = 0;
2965 +               REG_WR(ser, regi_ser, rw_intr_mask, intr_mask);
2966 +               return;
2967 +       }
2968 +
2969 +       count = ETRAX_SER_FIFO_SIZE;
2970 +       do {
2971 +               reg_ser_rw_dout dout = { .data = xmit->buf[xmit->tail] };
2972 +               REG_WR(ser, regi_ser, rw_dout, dout);
2973 +               REG_WR(ser, regi_ser, rw_ack_intr, ack_intr);
2974 +               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
2975 +               up->port.icount.tx++;
2976 +               if (xmit->head == xmit->tail)
2977 +                       break;
2978 +               rstat = REG_RD(ser, regi_ser, r_stat_din);
2979 +       } while ((--count > 0) && rstat.tr_rdy);
2980 +
2981 +       if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
2982 +               uart_write_wakeup(&up->port);
2983 +} /* transmit_chars_no_dma */
2984 +
2985 +static struct etrax_recv_buffer *
2986 +alloc_recv_buffer(unsigned int size)
2987 +{
2988 +       struct etrax_recv_buffer *buffer;
2989 +
2990 +       if (!(buffer = kmalloc(sizeof *buffer + size, GFP_ATOMIC)))
2991 +               panic("%s: Could not allocate %d bytes buffer\n",
2992 +                     __FUNCTION__, size);
2993 +
2994 +       buffer->next = NULL;
2995 +       buffer->length = 0;
2996 +       buffer->error = TTY_NORMAL;
2997 +
2998 +       return buffer;
2999 +}
3000 +
3001 +static void
3002 +append_recv_buffer(struct uart_cris_port *up,
3003 +                  struct etrax_recv_buffer *buffer)
3004 +{
3005 +       unsigned long flags;
3006 +
3007 +       local_irq_save(flags);
3008 +
3009 +       if (!up->first_recv_buffer)
3010 +               up->first_recv_buffer = buffer;
3011 +       else
3012 +               up->last_recv_buffer->next = buffer;
3013 +
3014 +       up->last_recv_buffer = buffer;
3015 +
3016 +       up->recv_cnt += buffer->length;
3017 +       if (up->recv_cnt > up->max_recv_cnt)
3018 +               up->max_recv_cnt = up->recv_cnt;
3019 +
3020 +       local_irq_restore(flags);
3021 +}
3022 +
3023 +static int
3024 +add_char_and_flag(struct uart_cris_port *up, unsigned char data,
3025 +                 unsigned char flag)
3026 +{
3027 +       struct etrax_recv_buffer *buffer;
3028 +
3029 +       buffer = alloc_recv_buffer(4);
3030 +       buffer->length = 1;
3031 +       buffer->error = flag;
3032 +       buffer->buffer[0] = data;
3033 +
3034 +       append_recv_buffer(up, buffer);
3035 +
3036 +       up->port.icount.rx++;
3037 +
3038 +       return 1;
3039 +}
3040 +
3041 +static void
3042 +flush_to_flip_buffer(struct uart_cris_port *up)
3043 +{
3044 +       struct tty_struct *tty;
3045 +       struct etrax_recv_buffer *buffer;
3046 +
3047 +       tty = up->port.info->tty;
3048 +       if (!up->first_recv_buffer || !tty)
3049 +               return;
3050 +
3051 +       while ((buffer = up->first_recv_buffer)) {
3052 +               unsigned int count = (unsigned int)
3053 +                       tty_insert_flip_string(tty, buffer->buffer,
3054 +                                              buffer->length);
3055 +
3056 +               up->recv_cnt -= count;
3057 +
3058 +               if (count == buffer->length) {
3059 +                       up->first_recv_buffer = buffer->next;
3060 +                       kfree(buffer);
3061 +               } else {
3062 +                       buffer->length -= count;
3063 +                       memmove(buffer->buffer, buffer->buffer + count,
3064 +                               buffer->length);
3065 +                       buffer->error = TTY_NORMAL;
3066 +               }
3067 +       }
3068 +
3069 +       if (!up->first_recv_buffer)
3070 +               up->last_recv_buffer = NULL;
3071 +
3072 +       /* This call includes a check for low-latency. */
3073 +       tty_flip_buffer_push(tty);
3074 +}
3075 +
3076 +static unsigned int
3077 +handle_descr_data(struct uart_cris_port *up, struct dma_descr_data *descr,
3078 +                 unsigned int recvl)
3079 +{
3080 +       struct etrax_recv_buffer *buffer
3081 +               = phys_to_virt((unsigned long)descr->buf) - sizeof *buffer;
3082 +
3083 +       if (up->recv_cnt + recvl > 65536) {
3084 +               printk(KERN_ERR "Too much pending incoming data on %s!"
3085 +                      " Dropping %u bytes.\n", up->port.info->tty->name,
3086 +                      recvl);
3087 +               return 0;
3088 +       }
3089 +
3090 +       buffer->length = recvl;
3091 +
3092 +       append_recv_buffer(up, buffer);
3093 +
3094 +       flush_to_flip_buffer(up);
3095 +
3096 +       buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE);
3097 +       descr->buf = (void*)virt_to_phys(buffer->buffer);
3098 +       descr->after = descr->buf + SERIAL_DESCR_BUF_SIZE;
3099 +
3100 +       return recvl;
3101 +}
3102 +
3103 +static unsigned int
3104 +handle_all_descr_data(struct uart_cris_port *up)
3105 +{
3106 +       struct dma_descr_data *descr
3107 +                = &up->rec_descr[(up->cur_rec_descr - 1)
3108 +                                 % SERIAL_RECV_DESCRIPTORS];
3109 +       struct dma_descr_data *prev_descr;
3110 +       unsigned int recvl;
3111 +       unsigned int ret = 0;
3112 +       reg_scope_instances regi_dmain = up->regi_dmain;
3113 +
3114 +       while (1) {
3115 +               prev_descr = descr;
3116 +               descr = &up->rec_descr[up->cur_rec_descr];
3117 +
3118 +               if (descr == phys_to_virt(REG_RD(dma, regi_dmain, rw_data)))
3119 +                       break;
3120 +
3121 +               if (++up->cur_rec_descr == SERIAL_RECV_DESCRIPTORS)
3122 +                       up->cur_rec_descr = 0;
3123 +
3124 +               /* Find out how many bytes were read. */
3125 +               recvl = descr->after - descr->buf;
3126 +
3127 +               /* Update stats. */
3128 +               up->port.icount.rx += recvl;
3129 +
3130 +               ret += handle_descr_data(up, descr, recvl);
3131 +               descr->eol = 1;
3132 +               /*
3133 +                * Make sure GCC doesn't move this eol clear before the
3134 +                * eol set above.
3135 +                */
3136 +               barrier();
3137 +               prev_descr->eol = 0;
3138 +               flush_dma_descr(descr,1); // Cache bug workaround
3139 +               flush_dma_descr(prev_descr,0); // Cache bug workaround
3140 +       }
3141 +
3142 +       return ret;
3143 +}
3144 +
3145 +static void
3146 +receive_chars_dma(struct uart_cris_port *up)
3147 +{
3148 +       reg_ser_r_stat_din rstat;
3149 +       reg_dma_rw_ack_intr ack_intr = {0};
3150 +
3151 +       /* Acknowledge both dma_descr and dma_eop irq. */
3152 +       ack_intr.data = 1;
3153 +       ack_intr.in_eop = 1;
3154 +       REG_WR(dma, up->regi_dmain, rw_ack_intr, ack_intr);
3155 +
3156 +       handle_all_descr_data(up);
3157 +
3158 +       /* Read the status register to detect errors. */
3159 +       rstat = REG_RD(ser, up->regi_ser, r_stat_din);
3160 +
3161 +       if (rstat.framing_err | rstat.par_err | rstat.orun) {
3162 +               /*
3163 +                * If we got an error, we must reset it by reading the
3164 +                * rs_stat_din register and put the data in buffer manually.
3165 +                */
3166 +               reg_ser_rs_stat_din stat_din;
3167 +                stat_din = REG_RD(ser, up->regi_ser, rs_stat_din);
3168 +
3169 +               if (stat_din.par_err)
3170 +                       add_char_and_flag(up, stat_din.data, TTY_PARITY);
3171 +               else if (stat_din.orun)
3172 +                       add_char_and_flag(up, stat_din.data, TTY_OVERRUN);
3173 +               else if (stat_din.framing_err)
3174 +                       add_char_and_flag(up, stat_din.data, TTY_FRAME);
3175 +       }
3176 +
3177 +       /* Restart the receiving DMA, in case it got stuck on an EOL. */
3178 +       DMA_CONTINUE_DATA(up->regi_dmain);
3179 +}
3180 +
3181 +void receive_chars_no_dma(struct uart_cris_port *up)
3182 +{
3183 +       reg_ser_rs_stat_din stat_din;
3184 +       reg_ser_r_stat_din rstat;
3185 +       struct tty_struct *tty;
3186 +       struct uart_icount *icount;
3187 +       int max_count = 16;
3188 +       char flag;
3189 +       reg_ser_rw_ack_intr ack_intr = { 0 };
3190 +
3191 +       rstat = REG_RD(ser, up->regi_ser, r_stat_din);
3192 +       up->last_rx_active_usec = GET_JIFFIES_USEC();
3193 +       up->last_rx_active = jiffies;
3194 +       icount = &up->port.icount;
3195 +       tty = up->port.info->tty;
3196 +
3197 +       do {
3198 +               stat_din = REG_RD(ser, up->regi_ser, rs_stat_din);
3199 +
3200 +               flag = TTY_NORMAL;
3201 +               ack_intr.dav = 1;
3202 +               REG_WR(ser, up->regi_ser, rw_ack_intr, ack_intr);
3203 +               icount->rx++;
3204 +
3205 +               if (stat_din.framing_err | stat_din.par_err | stat_din.orun) {
3206 +                       if (stat_din.data == 0x00 &&
3207 +                            stat_din.framing_err) {
3208 +                               /* Most likely a break. */
3209 +                               flag = TTY_BREAK;
3210 +                               icount->brk++;
3211 +                       } else if (stat_din.par_err) {
3212 +                               flag = TTY_PARITY;
3213 +                               icount->parity++;
3214 +                       } else if (stat_din.orun) {
3215 +                               flag = TTY_OVERRUN;
3216 +                               icount->overrun++;
3217 +                       } else if (stat_din.framing_err) {
3218 +                               flag = TTY_FRAME;
3219 +                               icount->frame++;
3220 +                       }
3221 +               }
3222 +
3223 +               /*
3224 +                * If this becomes important, we probably *could* handle this
3225 +                * gracefully by keeping track of the unhandled character.
3226 +                */
3227 +                if (!tty_insert_flip_char(tty, stat_din.data, flag))
3228 +                       panic("%s: No tty buffer space", __FUNCTION__);
3229 +               rstat = REG_RD(ser, up->regi_ser, r_stat_din);
3230 +       } while (rstat.dav && (max_count-- > 0));
3231 +       spin_unlock(&up->port.lock);
3232 +       tty_flip_buffer_push(tty);
3233 +       spin_lock(&up->port.lock);
3234 +} /* receive_chars_no_dma */
3235 +
3236 +/*
3237 + * DMA output channel interrupt handler.
3238 + * this interrupt is called from DMA2(ser2), DMA8(ser3), DMA6(ser0) or
3239 + * DMA4(ser1) when they have finished a descriptor with the intr flag set.
3240 + */
3241 +
3242 +static irqreturn_t
3243 +dma_tr_interrupt(int irq, void *dev_id, struct pt_regs * regs)
3244 +{
3245 +       struct uart_cris_port *up = (struct uart_cris_port *)dev_id;
3246 +       reg_dma_r_masked_intr masked_intr;
3247 +       reg_scope_instances regi_dmaout;
3248 +       int handled = 0;
3249 +
3250 +       spin_lock(&up->port.lock);
3251 +       regi_dmaout = up->regi_dmaout;
3252 +       if (!regi_dmaout) {
3253 +               spin_unlock(&up->port.lock);
3254 +               return IRQ_NONE;
3255 +       }
3256 +
3257 +       /*
3258 +        * Check for dma_descr (don't need to check for dma_eop in
3259 +        * output DMA for serial).
3260 +        */
3261 +       masked_intr = REG_RD(dma, regi_dmaout, r_masked_intr);
3262 +
3263 +       if (masked_intr.data) {
3264 +               /* We can send a new dma bunch. make it so. */
3265 +
3266 +               /*
3267 +                * Read jiffies_usec first.
3268 +                * We want this time to be as late as possible.
3269 +                */
3270 +               up->last_tx_active_usec = GET_JIFFIES_USEC();
3271 +               up->last_tx_active = jiffies;
3272 +               transmit_chars_dma(up);
3273 +               handled = 1;
3274 +       }
3275 +       check_modem_status(up);
3276 +       spin_unlock(&up->port.lock);
3277 +       return IRQ_RETVAL(handled);
3278 +}
3279 +
3280 +/* DMA input channel interrupt handler. */
3281 +
3282 +static irqreturn_t
3283 +dma_rec_interrupt(int irq, void *dev_id, struct pt_regs * regs)
3284 +{
3285 +       struct uart_cris_port *up = (struct uart_cris_port *)dev_id;
3286 +       reg_dma_r_masked_intr masked_intr;
3287 +       reg_scope_instances regi_dmain;
3288 +       int handled = 0;
3289 +
3290 +       spin_lock(&up->port.lock);
3291 +       regi_dmain = up->regi_dmain;
3292 +       if (!regi_dmain) {
3293 +               spin_unlock(&up->port.lock);
3294 +               return IRQ_NONE;
3295 +       }
3296 +
3297 +       /* Check for both dma_eop and dma_descr for the input dma channel. */
3298 +       masked_intr = REG_RD(dma, regi_dmain, r_masked_intr);
3299 +       if (masked_intr.data || masked_intr.in_eop) {
3300 +               /* We have received something. */
3301 +               receive_chars_dma(up);
3302 +               handled = 1;
3303 +       }
3304 +       check_modem_status(up);
3305 +       spin_unlock(&up->port.lock);
3306 +       return IRQ_RETVAL(handled);
3307 +}
3308 +
3309 +/* "Normal" serial port interrupt handler - both rx and tx. */
3310 +
3311 +static irqreturn_t
3312 +ser_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3313 +{
3314 +       struct uart_cris_port *up = (struct uart_cris_port *)dev_id;
3315 +       reg_scope_instances regi_ser;
3316 +       int handled = 0;
3317 +
3318 +       spin_lock(&up->port.lock);
3319 +       if (up->regi_dmain && up->regi_dmaout) {
3320 +               spin_unlock(&up->port.lock);
3321 +               return IRQ_NONE;
3322 +       }
3323 +
3324 +       regi_ser = up->regi_ser;
3325 +
3326 +       if (regi_ser) {
3327 +               reg_ser_r_masked_intr masked_intr;
3328 +               masked_intr = REG_RD(ser, regi_ser, r_masked_intr);
3329 +               /*
3330 +                * Check what interrupts are active before taking
3331 +                * actions. If DMA is used the interrupt shouldn't
3332 +                * be enabled.
3333 +                */
3334 +               if (masked_intr.dav) {
3335 +                       receive_chars_no_dma(up);
3336 +                       handled = 1;
3337 +               }
3338 +               check_modem_status(up);
3339 +
3340 +               if (masked_intr.tr_rdy) {
3341 +                       transmit_chars_no_dma(up);
3342 +                       handled = 1;
3343 +               }
3344 +       }
3345 +       spin_unlock(&up->port.lock);
3346 +       return IRQ_RETVAL(handled);
3347 +} /* ser_interrupt */
3348 +
3349 +static int start_recv_dma(struct uart_cris_port *up)
3350 +{
3351 +       struct dma_descr_data *descr = up->rec_descr;
3352 +       struct etrax_recv_buffer *buffer;
3353 +       int i;
3354 +
3355 +       /* Set up the receiving descriptors. */
3356 +       for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) {
3357 +               buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE);
3358 +               descr[i].next = (void*)virt_to_phys(&descr[i+1]);
3359 +               descr[i].buf = (void*)virt_to_phys(buffer->buffer);
3360 +               descr[i].after = descr[i].buf + SERIAL_DESCR_BUF_SIZE;
3361 +               descr[i].eol = 0;
3362 +               descr[i].out_eop = 0;
3363 +               descr[i].intr = 1;
3364 +               descr[i].wait = 0;
3365 +               descr[i].in_eop = 0;
3366 +               descr[i].md = 0;
3367 +
3368 +       }
3369 +
3370 +       /* Link the last descriptor to the first. */
3371 +       descr[i-1].next = (void*)virt_to_phys(&descr[0]);
3372 +
3373 +       /* And mark it as end of list. */
3374 +       descr[i-1].eol = 1;
3375 +
3376 +       /* Start with the first descriptor in the list. */
3377 +       up->cur_rec_descr = 0;
3378 +       up->rec_context_descr.next = 0;
3379 +       up->rec_context_descr.saved_data
3380 +               = (dma_descr_data *)virt_to_phys(&descr[up->cur_rec_descr]);
3381 +       up->rec_context_descr.saved_data_buf = descr[up->cur_rec_descr].buf;
3382 +
3383 +       /* Start the DMA. */
3384 +       DMA_START_CONTEXT(up->regi_dmain,
3385 +                         virt_to_phys(&up->rec_context_descr));
3386 +
3387 +       /* Input DMA should be running now. */
3388 +       return 1;
3389 +}
3390 +
3391 +
3392 +static void start_receive(struct uart_cris_port *up)
3393 +{
3394 +       reg_scope_instances regi_dmain = up->regi_dmain;
3395 +       if (regi_dmain) {
3396 +               start_recv_dma(up);
3397 +       }
3398 +}
3399 +
3400 +
3401 +static void start_transmitter(struct uart_cris_port *up)
3402 +{
3403 +       int i;
3404 +       reg_scope_instances regi_dmaout = up->regi_dmaout;
3405 +       if (regi_dmaout) {
3406 +               for (i = 0; i < SERIAL_TX_DESCRIPTORS; i++) {
3407 +                       memset(&up->tr_descr[i], 0, sizeof(up->tr_descr[i]));
3408 +                       up->tr_descr[i].eol = 1;
3409 +                       up->tr_descr[i].intr = 1;
3410 +                       up->tr_descr[i].next = (dma_descr_data *)
3411 +                               virt_to_phys(&up->tr_descr[i+1]);
3412 +               }
3413 +               up->tr_descr[i-1].next = (dma_descr_data *)
3414 +                       virt_to_phys(&up->tr_descr[0]);
3415 +               up->first_tx_descr = &up->tr_descr[0];
3416 +
3417 +               /*
3418 +                * We'll be counting up to up->last_tx_descr->next from
3419 +                * up->first_tx_descr when starting DMA, so we should make
3420 +                * them the same for the very first round.  If instead we'd
3421 +                * set last_tx_descr = first_tx_descr, we'd rely on
3422 +                * accidentally working code and data as we'd take a pass over
3423 +                * the first, unused, descriptor.
3424 +                */
3425 +               up->last_tx_descr = &up->tr_descr[i-1];
3426 +               up->tx_started = 0;
3427 +               up->tx_pending_chars = 0;
3428 +       }
3429 +}
3430 +
3431 +static int serial_cris_startup(struct uart_port *port)
3432 +{
3433 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
3434 +       unsigned long flags;
3435 +       reg_intr_vect_rw_mask intr_mask;
3436 +       reg_ser_rw_intr_mask ser_intr_mask = {0};
3437 +       reg_dma_rw_intr_mask dmain_intr_mask = {0};
3438 +       reg_dma_rw_intr_mask dmaout_intr_mask = {0};
3439 +       reg_dma_rw_cfg cfg = {.en = 1};
3440 +       reg_scope_instances regi_dma;
3441 +
3442 +       spin_lock_irqsave(&up->port.lock, flags);
3443 +
3444 +       intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
3445 +
3446 +       dmain_intr_mask.data = dmain_intr_mask.in_eop = regk_dma_yes;
3447 +       dmaout_intr_mask.data = regk_dma_yes;
3448 +       if (!up->regi_dmain)
3449 +               ser_intr_mask.dav = regk_ser_yes;
3450 +
3451 +       if (port->line == 0) {
3452 +               if (request_irq(SER0_INTR_VECT, ser_interrupt,
3453 +                               IRQF_SHARED | IRQF_DISABLED, "ser0",
3454 +                               &serial_cris_ports[0]))
3455 +                       panic("irq ser0");
3456 +               /* Enable the ser0 irq in global config. */
3457 +               intr_mask.ser0 = 1;
3458 +               /* Port ser0 can use dma6 for tx and dma7 for rx. */
3459 +#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
3460 +               if (request_irq(DMA6_INTR_VECT, dma_tr_interrupt,
3461 +                               IRQF_DISABLED, "serial 0 dma tr",
3462 +                               &serial_cris_ports[0]))
3463 +                       panic("irq ser0txdma");
3464 +               crisv32_request_dma(6, "ser0", DMA_PANIC_ON_ERROR, 0,
3465 +                                   dma_ser0);
3466 +               /* Enable the dma6 irq in global config. */
3467 +               intr_mask.dma6 = 1;
3468 +#endif
3469 +#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
3470 +               if (request_irq(DMA7_INTR_VECT, dma_rec_interrupt,
3471 +                               IRQF_DISABLED, "serial 0 dma rec",
3472 +                               &serial_cris_ports[0]))
3473 +                       panic("irq ser0rxdma");
3474 +               crisv32_request_dma(7, "ser0", DMA_PANIC_ON_ERROR, 0,
3475 +                                   dma_ser0);
3476 +               /* Enable the dma7 irq in global config. */
3477 +               intr_mask.dma7 = 1;
3478 +#endif
3479 +       } else if (port->line == 1) {
3480 +               if (request_irq(SER1_INTR_VECT, ser_interrupt,
3481 +                               IRQF_SHARED | IRQF_DISABLED, "ser1",
3482 +                               &serial_cris_ports[1]))
3483 +                       panic("irq ser1");
3484 +               /* Enable the ser1 irq in global config. */
3485 +               intr_mask.ser1 = 1;
3486 +
3487 +               /* Port ser1 can use dma4 for tx and dma5 for rx. */
3488 +#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA4_OUT
3489 +               if (request_irq(DMA4_INTR_VECT, dma_tr_interrupt,
3490 +                               IRQF_DISABLED, "serial 1 dma tr",
3491 +                               &serial_cris_ports[1]))
3492 +                       panic("irq ser1txdma");
3493 +               crisv32_request_dma(4, "ser1", DMA_PANIC_ON_ERROR, 0,
3494 +                                   dma_ser1);
3495 +               /* Enable the dma4 irq in global config. */
3496 +               intr_mask.dma4 = 1;
3497 +#endif
3498 +#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA5_IN
3499 +               if (request_irq(DMA5_INTR_VECT, dma_rec_interrupt,
3500 +                               IRQF_DISABLED, "serial 1 dma rec",
3501 +                               &serial_cris_ports[1]))
3502 +                       panic("irq ser1rxdma");
3503 +               crisv32_request_dma(5, "ser1", DMA_PANIC_ON_ERROR, 0,
3504 +                                   dma_ser1);
3505 +               /* Enable the dma5 irq in global config. */
3506 +               intr_mask.dma5 = 1;
3507 +#endif
3508 +       } else if (port->line == 2) {
3509 +               if (request_irq(SER2_INTR_VECT, ser_interrupt,
3510 +                               IRQF_SHARED | IRQF_DISABLED, "ser2",
3511 +                               &serial_cris_ports[2]))
3512 +                       panic("irq ser2");
3513 +               /* Enable the ser2 irq in global config. */
3514 +               intr_mask.ser2 = 1;
3515 +
3516 +               /* Port ser2 can use dma2 for tx and dma3 for rx. */
3517 +#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
3518 +               if (request_irq(DMA2_INTR_VECT, dma_tr_interrupt,
3519 +                               IRQF_DISABLED, "serial 2 dma tr",
3520 +                               &serial_cris_ports[2]))
3521 +                       panic("irq ser2txdma");
3522 +               crisv32_request_dma(2, "ser2", DMA_PANIC_ON_ERROR, 0,
3523 +                                   dma_ser2);
3524 +               /* Enable the dma2 irq in global config. */
3525 +               intr_mask.dma2 = 1;
3526 +#endif
3527 +#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
3528 +               if (request_irq(DMA3_INTR_VECT, dma_rec_interrupt,
3529 +                               IRQF_DISABLED, "serial 2 dma rec",
3530 +                               &serial_cris_ports[2]))
3531 +                       panic("irq ser2rxdma");
3532 +               crisv32_request_dma(3, "ser2", DMA_PANIC_ON_ERROR, 0,
3533 +                                   dma_ser2);
3534 +               /* Enable the dma3 irq in global config. */
3535 +               intr_mask.dma3 = 1;
3536 +#endif
3537 +       } else if (port->line == 3) {
3538 +               if (request_irq(SER3_INTR_VECT, ser_interrupt,
3539 +                               IRQF_SHARED | IRQF_DISABLED, "ser3",
3540 +                               &serial_cris_ports[3]))
3541 +                       panic("irq ser3" );
3542 +               /* Enable the ser3 irq in global config. */
3543 +               intr_mask.ser3 = 1;
3544 +
3545 +               /* Port ser3 can use dma8 for tx and dma9 for rx. */
3546 +#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA8_OUT
3547 +               if (request_irq(DMA8_INTR_VECT, dma_tr_interrupt,
3548 +                               IRQF_DISABLED, "serial 3 dma tr",
3549 +                               &serial_cris_ports[3]))
3550 +                       panic("irq ser3txdma");
3551 +               crisv32_request_dma(8, "ser3", DMA_PANIC_ON_ERROR, 0,
3552 +                                   dma_ser3);
3553 +               /* Enable the dma2 irq in global config. */
3554 +               intr_mask.dma8 = 1;
3555 +#endif
3556 +#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA9_IN
3557 +               if (request_irq(DMA9_INTR_VECT, dma_rec_interrupt,
3558 +                               IRQF_DISABLED, "serial 3 dma rec",
3559 +                               &serial_cris_ports[3]))
3560 +                       panic("irq ser3rxdma");
3561 +               crisv32_request_dma(9, "ser3", DMA_PANIC_ON_ERROR, 0,
3562 +                                   dma_ser3);
3563 +               /* Enable the dma3 irq in global config. */
3564 +               intr_mask.dma9 = 1;
3565 +#endif
3566 +       }
3567 +
3568 +       /*
3569 +        * Reset the DMA channels and make sure their interrupts are cleared.
3570 +        */
3571 +
3572 +       regi_dma = up->regi_dmain;
3573 +       if (regi_dma) {
3574 +               reg_dma_rw_ack_intr ack_intr = { 0 };
3575 +               DMA_RESET(regi_dma);
3576 +               /* Wait until reset cycle is complete. */
3577 +               DMA_WAIT_UNTIL_RESET(regi_dma);
3578 +               REG_WR(dma, regi_dma, rw_cfg, cfg);
3579 +               /* Make sure the irqs are cleared. */
3580 +               ack_intr.group = 1;
3581 +               ack_intr.ctxt = 1;
3582 +               ack_intr.data = 1;
3583 +               ack_intr.in_eop = 1;
3584 +               ack_intr.stream_cmd = 1;
3585 +               REG_WR(dma, regi_dma, rw_ack_intr, ack_intr);
3586 +       }
3587 +       regi_dma = up->regi_dmaout;
3588 +       if (regi_dma) {
3589 +               reg_dma_rw_ack_intr ack_intr = { 0 };
3590 +               DMA_RESET(regi_dma);
3591 +               /* Wait until reset cycle is complete. */
3592 +               DMA_WAIT_UNTIL_RESET(regi_dma);
3593 +               REG_WR(dma, regi_dma, rw_cfg, cfg);
3594 +               /* Make sure the irqs are cleared. */
3595 +               ack_intr.group = 1;
3596 +               ack_intr.ctxt = 1;
3597 +               ack_intr.data = 1;
3598 +               ack_intr.in_eop = 1;
3599 +               ack_intr.stream_cmd = 1;
3600 +               REG_WR(dma, regi_dma, rw_ack_intr, ack_intr);
3601 +       }
3602 +
3603 +       REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
3604 +       REG_WR(ser, up->regi_ser, rw_intr_mask, ser_intr_mask);
3605 +       if (up->regi_dmain)
3606 +               REG_WR(dma, up->regi_dmain, rw_intr_mask, dmain_intr_mask);
3607 +       if (up->regi_dmaout)
3608 +               REG_WR(dma, up->regi_dmaout, rw_intr_mask, dmaout_intr_mask);
3609 +
3610 +       start_receive(up);
3611 +       start_transmitter(up);
3612 +
3613 +       serial_cris_set_mctrl(&up->port, up->port.mctrl);
3614 +       spin_unlock_irqrestore(&up->port.lock, flags);
3615 +
3616 +       return 0;
3617 +}
3618 +
3619 +static void serial_cris_shutdown(struct uart_port *port)
3620 +{
3621 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
3622 +       unsigned long flags;
3623 +       reg_intr_vect_rw_mask intr_mask;
3624 +
3625 +       spin_lock_irqsave(&up->port.lock, flags);
3626 +
3627 +       intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
3628 +       serial_cris_stop_tx(port);
3629 +       serial_cris_stop_rx(port);
3630 +
3631 +       if (port->line == 0) {
3632 +               intr_mask.ser0 = 0;
3633 +               free_irq(SER0_INTR_VECT, &serial_cris_ports[0]);
3634 +#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
3635 +               intr_mask.dma6 = 0;
3636 +               crisv32_free_dma(6);
3637 +               free_irq(DMA6_INTR_VECT, &serial_cris_ports[0]);
3638 +#endif
3639 +#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
3640 +               intr_mask.dma7 = 0;
3641 +               crisv32_free_dma(7);
3642 +               free_irq(DMA7_INTR_VECT, &serial_cris_ports[0]);
3643 +#endif
3644 +       } else if (port->line == 1) {
3645 +               intr_mask.ser1 = 0;
3646 +               free_irq(SER1_INTR_VECT, &serial_cris_ports[1]);
3647 +#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA4_OUT
3648 +               intr_mask.dma4 = 0;
3649 +               crisv32_free_dma(4);
3650 +               free_irq(DMA4_INTR_VECT, &serial_cris_ports[1]);
3651 +#endif
3652 +#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA5_IN
3653 +               intr_mask.dma5 = 0;
3654 +               crisv32_free_dma(5);
3655 +               free_irq(DMA5_INTR_VECT, &serial_cris_ports[1]);
3656 +#endif
3657 +       } else if (port->line == 2) {
3658 +               intr_mask.ser2 = 0;
3659 +               free_irq(SER2_INTR_VECT, &serial_cris_ports[2]);
3660 +#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
3661 +               intr_mask.dma2 = 0;
3662 +               crisv32_free_dma(2);
3663 +               free_irq(DMA2_INTR_VECT, &serial_cris_ports[2]);
3664 +#endif
3665 +#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
3666 +               intr_mask.dma3 = 0;
3667 +               crisv32_free_dma(3);
3668 +               free_irq(DMA3_INTR_VECT, &serial_cris_ports[2]);
3669 +#endif
3670 +       } else if (port->line == 3) {
3671 +               intr_mask.ser3 = 0;
3672 +               free_irq(SER3_INTR_VECT, &serial_cris_ports[3]);
3673 +#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA8_OUT
3674 +               intr_mask.dma8 = 0;
3675 +               crisv32_free_dma(8);
3676 +               free_irq(DMA8_INTR_VECT, &serial_cris_ports[3]);
3677 +#endif
3678 +#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA9_IN
3679 +               intr_mask.dma9 = 0;
3680 +               crisv32_free_dma(9);
3681 +               free_irq(DMA9_INTR_VECT, &serial_cris_ports[3]);
3682 +#endif
3683 +       }
3684 +
3685 +       REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
3686 +
3687 +       serial_cris_set_mctrl(&up->port, up->port.mctrl);
3688 +
3689 +       if (up->regi_dmain) {
3690 +               struct etrax_recv_buffer *rb;
3691 +               struct etrax_recv_buffer *rb_next;
3692 +               int i;
3693 +               struct dma_descr_data *descr;
3694 +
3695 +               /*
3696 +                * In case of DMA and receive errors, there might be pending
3697 +                * receive buffers still linked here and not flushed upwards.
3698 +                * Release them.
3699 +                */
3700 +               for (rb = up->first_recv_buffer; rb != NULL; rb = rb_next) {
3701 +                       rb_next = rb->next;
3702 +                       kfree (rb);
3703 +               }
3704 +               up->first_recv_buffer = NULL;
3705 +               up->last_recv_buffer = NULL;
3706 +
3707 +               /*
3708 +                * Also release buffers that were attached to the DMA
3709 +                * before we shut down the hardware above.
3710 +                */
3711 +               for (i = 0, descr = up->rec_descr;
3712 +                    i < SERIAL_RECV_DESCRIPTORS;
3713 +                    i++)
3714 +                       if (descr[i].buf) {
3715 +                               rb = phys_to_virt((u32) descr[i].buf)
3716 +                                 - sizeof *rb;
3717 +                               kfree(rb);
3718 +                               descr[i].buf = NULL;
3719 +                       }
3720 +       }
3721 +
3722 +       spin_unlock_irqrestore(&up->port.lock, flags);
3723 +
3724 +}
3725 +
3726 +static void
3727 +serial_cris_set_termios(struct uart_port *port, struct termios *termios,
3728 +                       struct termios *old)
3729 +{
3730 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
3731 +       unsigned long flags;
3732 +       reg_ser_rw_xoff xoff;
3733 +       reg_ser_rw_xoff_clr xoff_clr = {0};
3734 +       reg_ser_rw_tr_ctrl tx_ctrl = {0};
3735 +       reg_ser_rw_tr_dma_en tx_dma_en = {0};
3736 +       reg_ser_rw_rec_ctrl rx_ctrl = {0};
3737 +       reg_ser_rw_tr_baud_div tx_baud_div = {0};
3738 +       reg_ser_rw_rec_baud_div rx_baud_div = {0};
3739 +       reg_ser_r_stat_din rstat;
3740 +        int baud;
3741 +
3742 +       if (old &&
3743 +            termios->c_cflag == old->c_cflag &&
3744 +           termios->c_iflag == old->c_iflag)
3745 +               return;
3746 +
3747 +       /* Start with default settings and then fill in changes. */
3748 +
3749 +       /* Tx: 8 bit, no/even parity, 1 stop bit, no cts. */
3750 +       tx_ctrl.base_freq = regk_ser_f29_493;
3751 +       tx_ctrl.en = 0;
3752 +       tx_ctrl.stop = 0;
3753 +#ifdef CONFIG_ETRAX_RS485
3754 +       if (up->rs485.enabled && (up->port_type != TYPE_485FD)) {
3755 +         tx_ctrl.auto_rts = regk_ser_yes;
3756 +       } else
3757 +#endif
3758 +         tx_ctrl.auto_rts = regk_ser_no;
3759 +       tx_ctrl.txd = 1;
3760 +       tx_ctrl.auto_cts = 0;
3761 +       /* Rx: 8 bit, no/even parity. */
3762 +       if (up->regi_dmain) {
3763 +               rx_ctrl.dma_mode = 1;
3764 +               rx_ctrl.auto_eop = 1;
3765 +       }
3766 +       rx_ctrl.dma_err = regk_ser_stop;
3767 +       rx_ctrl.sampling = regk_ser_majority;
3768 +       rx_ctrl.timeout = 1;
3769 +
3770 +#ifdef CONFIG_ETRAX_RS485
3771 +       if (up->rs485.enabled && (up->port_type != TYPE_485FD)) {
3772 +#  ifdef CONFIG_ETRAX_RS485_DISABLE_RECEIVER
3773 +               rx_ctrl.half_duplex = regk_ser_yes;
3774 +#  endif 
3775 +               rx_ctrl.rts_n = up->rs485.rts_after_sent ?
3776 +                 regk_ser_active : regk_ser_inactive;
3777 +       } else if (up->port_type == TYPE_485FD) {
3778 +               rx_ctrl.rts_n = regk_ser_active;
3779 +       } else
3780 +#endif
3781 +         rx_ctrl.rts_n = regk_ser_inactive;
3782 +
3783 +       /* Common for tx and rx: 8N1. */
3784 +       tx_ctrl.data_bits = regk_ser_bits8;
3785 +       rx_ctrl.data_bits = regk_ser_bits8;
3786 +       tx_ctrl.par = regk_ser_even;
3787 +       rx_ctrl.par = regk_ser_even;
3788 +       tx_ctrl.par_en = regk_ser_no;
3789 +       rx_ctrl.par_en = regk_ser_no;
3790 +
3791 +       tx_ctrl.stop_bits = regk_ser_bits1;
3792 +
3793 +
3794 +       /* Change baud-rate and write it to the hardware. */
3795 +
3796 +       /* baud_clock = base_freq / (divisor*8)
3797 +        * divisor = base_freq / (baud_clock * 8)
3798 +        * base_freq is either:
3799 +        * off, ext, 29.493MHz, 32.000 MHz, 32.768 MHz or 100 MHz
3800 +        * 20.493MHz is used for standard baudrates
3801 +        */
3802 +
3803 +       /*
3804 +        * For the console port we keep the original baudrate here.  Not very
3805 +        * beautiful.
3806 +        */
3807 +        if ((port != console_port) || old)
3808 +               baud = uart_get_baud_rate(port, termios, old, 0,
3809 +                                         port->uartclk / 8);
3810 +       else
3811 +               baud = console_baud;
3812 +
3813 +       tx_baud_div.div = 29493000 / (8 * baud);
3814 +       /* Rx uses same as tx. */
3815 +       rx_baud_div.div = tx_baud_div.div;
3816 +       rx_ctrl.base_freq = tx_ctrl.base_freq;
3817 +
3818 +       if ((termios->c_cflag & CSIZE) == CS7) {
3819 +               /* Set 7 bit mode. */
3820 +               tx_ctrl.data_bits = regk_ser_bits7;
3821 +               rx_ctrl.data_bits = regk_ser_bits7;
3822 +       }
3823 +
3824 +       if (termios->c_cflag & CSTOPB) {
3825 +               /* Set 2 stop bit mode. */
3826 +               tx_ctrl.stop_bits = regk_ser_bits2;
3827 +       }
3828 +
3829 +       if (termios->c_cflag & PARENB) {
3830 +               /* Enable parity. */
3831 +               tx_ctrl.par_en = regk_ser_yes;
3832 +               rx_ctrl.par_en = regk_ser_yes;
3833 +       }
3834 +
3835 +       if (termios->c_cflag & CMSPAR) {
3836 +               if (termios->c_cflag & PARODD) {
3837 +                       /* Set mark parity if PARODD and CMSPAR. */
3838 +                       tx_ctrl.par = regk_ser_mark;
3839 +                       rx_ctrl.par = regk_ser_mark;
3840 +               } else {
3841 +                       tx_ctrl.par = regk_ser_space;
3842 +                       rx_ctrl.par = regk_ser_space;
3843 +               }
3844 +       } else {
3845 +               if (termios->c_cflag & PARODD) {
3846 +                       /* Set odd parity. */
3847 +                      tx_ctrl.par = regk_ser_odd;
3848 +                      rx_ctrl.par = regk_ser_odd;
3849 +               }
3850 +       }
3851 +
3852 +       if (termios->c_cflag & CRTSCTS) {
3853 +               /* Enable automatic CTS handling. */
3854 +               tx_ctrl.auto_cts = regk_ser_yes;
3855 +       }
3856 +
3857 +       /* Make sure the tx and rx are enabled. */
3858 +       tx_ctrl.en = regk_ser_yes;
3859 +       rx_ctrl.en = regk_ser_yes;
3860 +
3861 +       /*
3862 +        * Wait for tr_idle in case a character is being output, so it won't
3863 +        * be damaged by the changes we do below.  It seems the termios
3864 +        * changes "sometimes" (we can't see e.g. a tcsetattr TCSANOW
3865 +        * parameter here) should take place no matter what state.  However,
3866 +        * in case we should wait, we may have a non-empty transmitter state
3867 +        * as we tell the upper layers that we're all done when we've passed
3868 +        * characters to the hardware, but we don't wait for them being
3869 +        * actually shifted out.
3870 +        */
3871 +       spin_lock_irqsave(&port->lock, flags);
3872 +
3873 +       /*
3874 +        * None of our interrupts re-enable DMA, so it's thankfully ok to
3875 +        * disable it once, outside the loop.
3876 +        */
3877 +       tx_dma_en.en = 0;
3878 +       REG_WR(ser, up->regi_ser, rw_tr_dma_en, tx_dma_en);
3879 +       do {
3880 +               /*
3881 +                * Make sure we have integrity between the read r_stat status
3882 +                * and us writing the registers below, but don't busy-wait
3883 +                * with interrupts off.  We need to keep the port lock though
3884 +                * (if we go SMP), so nobody else writes characters.
3885 +                */
3886 +               local_irq_restore(flags);
3887 +               local_irq_save(flags);
3888 +               rstat = REG_RD(ser, up->regi_ser, r_stat_din);
3889 +       } while (!rstat.tr_idle);
3890 +
3891 +       /* Actually write the control regs (if modified) to the hardware. */
3892 +
3893 +       uart_update_timeout(port, termios->c_cflag, port->uartclk/8);
3894 +       MODIFY_REG(up->regi_ser, rw_rec_baud_div, rx_baud_div);
3895 +       MODIFY_REG(up->regi_ser, rw_rec_ctrl, rx_ctrl);
3896 +
3897 +       MODIFY_REG(up->regi_ser, rw_tr_baud_div, tx_baud_div);
3898 +       MODIFY_REG(up->regi_ser, rw_tr_ctrl, tx_ctrl);
3899 +
3900 +       tx_dma_en.en = up->regi_dmaout != 0;
3901 +       REG_WR(ser, up->regi_ser, rw_tr_dma_en, tx_dma_en);
3902 +
3903 +       xoff = REG_RD(ser, up->regi_ser, rw_xoff);
3904 +
3905 +       if (up->port.info && (up->port.info->tty->termios->c_iflag & IXON)) {
3906 +               xoff.chr = STOP_CHAR(up->port.info->tty);
3907 +               xoff.automatic = regk_ser_yes;
3908 +       } else
3909 +               xoff.automatic = regk_ser_no;
3910 +
3911 +       MODIFY_REG(up->regi_ser, rw_xoff, xoff);
3912 +
3913 +       /*
3914 +        * Make sure we don't start in an automatically shut-off state due to
3915 +        * a previous early exit.
3916 +        */
3917 +       xoff_clr.clr = 1;
3918 +       REG_WR(ser, up->regi_ser, rw_xoff_clr, xoff_clr);
3919 +
3920 +       serial_cris_set_mctrl(&up->port, up->port.mctrl);
3921 +       spin_unlock_irqrestore(&up->port.lock, flags);
3922 +}
3923 +
3924 +static const char *
3925 +serial_cris_type(struct uart_port *port)
3926 +{
3927 +       return "CRISv32";
3928 +}
3929 +
3930 +static void serial_cris_release_port(struct uart_port *port)
3931 +{
3932 +}
3933 +
3934 +static int serial_cris_request_port(struct uart_port *port)
3935 +{
3936 +       return 0;
3937 +}
3938 +
3939 +static void serial_cris_config_port(struct uart_port *port, int flags)
3940 +{
3941 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
3942 +       up->port.type = PORT_CRIS;
3943 +}
3944 +
3945 +#if defined(CONFIG_ETRAX_RS485)
3946 +
3947 +static void cris_set_rs485_mode(struct uart_cris_port* up) {
3948 +       reg_ser_rw_tr_ctrl tr_ctrl;
3949 +       reg_ser_rw_rec_ctrl rec_ctrl;
3950 +       reg_scope_instances regi_ser = up->regi_ser;
3951 +
3952 +       if (up->port_type == TYPE_485FD)
3953 +               /* We do not want to change anything if we are in 485FD mode */
3954 +               return;
3955 +
3956 +       tr_ctrl = REG_RD(ser, regi_ser, rw_tr_ctrl);
3957 +       rec_ctrl = REG_RD(ser, regi_ser, rw_rec_ctrl);
3958 +
3959 +       /* Set port in RS-485 mode */
3960 +       if (up->rs485.enabled) {
3961 +               tr_ctrl.auto_rts = regk_ser_yes;
3962 +               rec_ctrl.rts_n = up->rs485.rts_after_sent ?
3963 +                 regk_ser_active : regk_ser_inactive;
3964 +#ifdef CONFIG_ETRAX_RS485_DISABLE_RECEIVER
3965 +               rec_ctrl.half_duplex = regk_ser_yes;
3966 +#endif
3967 +       }
3968 +       /* Set port to RS-232 mode */
3969 +       else {
3970 +               rec_ctrl.rts_n = regk_ser_inactive;
3971 +               tr_ctrl.auto_rts = regk_ser_no;
3972 +               rec_ctrl.half_duplex = regk_ser_no;
3973 +       }
3974 +
3975 +       REG_WR(ser, regi_ser, rw_tr_ctrl, tr_ctrl);
3976 +       REG_WR(ser, regi_ser, rw_rec_ctrl, rec_ctrl);
3977 +}
3978 +
3979 +/* Enable/disable RS-485 mode on selected port. */
3980 +static int
3981 +cris_enable_rs485(struct uart_cris_port* up, struct rs485_control *r)
3982 +{
3983 +       if (up->port_type == TYPE_485FD)
3984 +               /* Port in 485FD mode can not chage mode */
3985 +               goto out;
3986 +
3987 +       up->rs485.enabled = 0x1 & r->enabled;
3988 +       up->rs485.rts_on_send = 0x01 & r->rts_on_send;
3989 +       up->rs485.rts_after_sent = 0x01 & r->rts_after_sent;
3990 +       up->rs485.delay_rts_before_send = r->delay_rts_before_send;
3991 +       
3992 +       cris_set_rs485_mode(up);
3993 + out:
3994 +       return 0;
3995 +}
3996 +
3997 +
3998 +/* Enable RS485 mode on port and send the data. Port will stay
3999 + * in 485 mode after the data has been sent.
4000 + */
4001 +static int
4002 +cris_write_rs485(struct uart_cris_port* up, const unsigned char *buf,
4003 +                int count)
4004 +{
4005 +       up->rs485.enabled = 1;
4006 +
4007 +       /* Set the port in RS485 mode */
4008 +       cris_set_rs485_mode(up);
4009 +
4010 +       /* Send the data */
4011 +       count = serial_cris_driver.tty_driver->write(up->port.info->tty, buf, count);
4012 +
4013 +       return count;
4014 +}
4015 +
4016 +#endif /* CONFIG_ETRAX_RS485 */
4017 +
4018 +static int serial_cris_ioctl(struct uart_port *port, unsigned int cmd,
4019 +                             unsigned long arg)
4020 +{
4021 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
4022 +
4023 +       switch (cmd) {
4024 +#if defined(CONFIG_ETRAX_RS485)
4025 +       case TIOCSERSETRS485: {
4026 +               struct rs485_control rs485ctrl;
4027 +               if (copy_from_user(&rs485ctrl, (struct rs485_control*) arg,
4028 +                                  sizeof(rs485ctrl)))
4029 +                       return -EFAULT;
4030 +
4031 +               return cris_enable_rs485(up, &rs485ctrl);
4032 +       }
4033 +
4034 +       case TIOCSERWRRS485: {
4035 +               struct rs485_write rs485wr;
4036 +               if (copy_from_user(&rs485wr, (struct rs485_write*)arg,
4037 +                                  sizeof(rs485wr)))
4038 +                       return -EFAULT;
4039 +
4040 +               return cris_write_rs485(up, rs485wr.outc, rs485wr.outc_size);
4041 +       }
4042 +#endif
4043 +       default:
4044 +               return -ENOIOCTLCMD;
4045 +       }
4046 +
4047 +       return 0;
4048 +}
4049 +
4050 +static const struct uart_ops serial_cris_pops = {
4051 +       .tx_empty       = serial_cris_tx_empty,
4052 +       .set_mctrl      = serial_cris_set_mctrl,
4053 +       .get_mctrl      = serial_cris_get_mctrl,
4054 +       .stop_tx        = serial_cris_stop_tx,
4055 +       .start_tx       = serial_cris_start_tx,
4056 +       .send_xchar     = serial_cris_send_xchar,
4057 +       .stop_rx        = serial_cris_stop_rx,
4058 +       .enable_ms      = serial_cris_enable_ms,
4059 +       .break_ctl      = serial_cris_break_ctl,
4060 +       .startup        = serial_cris_startup,
4061 +       .shutdown       = serial_cris_shutdown,
4062 +       .set_termios    = serial_cris_set_termios,
4063 +       .type           = serial_cris_type,
4064 +       .release_port   = serial_cris_release_port,
4065 +       .request_port   = serial_cris_request_port,
4066 +       .config_port    = serial_cris_config_port,
4067 +       .ioctl          = serial_cris_ioctl,
4068 +};
4069 +
4070 +/*
4071 + * It's too easy to break CONFIG_ETRAX_DEBUG_PORT_NULL and the
4072 + * no-config choices by adding and moving code to before a necessary
4073 + * early exit in all functions for the special case of
4074 + * up->regi_ser == 0.  This collection of dummy functions lets us
4075 + * avoid that.  Maybe there should be a generic table of dummy serial
4076 + * functions?
4077 + */
4078 +
4079 +static unsigned int serial_cris_tx_empty_dummy(struct uart_port *port)
4080 +{
4081 +       return TIOCSER_TEMT;
4082 +}
4083 +
4084 +static void serial_cris_set_mctrl_dummy(struct uart_port *port,
4085 +                                       unsigned int mctrl)
4086 +{
4087 +}
4088 +
4089 +static unsigned int serial_cris_get_mctrl_dummy(struct uart_port *port)
4090 +{
4091 +       return 0;
4092 +}
4093 +
4094 +static void serial_cris_stop_tx_dummy(struct uart_port *port)
4095 +{
4096 +}
4097 +
4098 +static void serial_cris_start_tx_dummy(struct uart_port *port)
4099 +{
4100 +       /* Discard outbound characters. */
4101 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
4102 +       struct circ_buf *xmit = &up->port.info->xmit;
4103 +       xmit->tail = xmit->head;
4104 +       uart_write_wakeup(port);
4105 +}
4106 +
4107 +#define serial_cris_stop_rx_dummy serial_cris_stop_tx_dummy
4108 +
4109 +#define serial_cris_enable_ms_dummy serial_cris_stop_tx_dummy
4110 +
4111 +static void serial_cris_break_ctl_dummy(struct uart_port *port,
4112 +                                       int break_state)
4113 +{
4114 +}
4115 +
4116 +static int serial_cris_startup_dummy(struct uart_port *port)
4117 +{
4118 +       return 0;
4119 +}
4120 +
4121 +#define serial_cris_shutdown_dummy serial_cris_stop_tx_dummy
4122 +
4123 +static void
4124 +serial_cris_set_termios_dummy(struct uart_port *port, struct termios *termios,
4125 +                             struct termios *old)
4126 +{
4127 +}
4128 +
4129 +#define serial_cris_release_port_dummy serial_cris_stop_tx_dummy
4130 +#define serial_cris_request_port_dummy serial_cris_startup_dummy
4131 +
4132 +static const struct uart_ops serial_cris_dummy_pops = {
4133 +       /*
4134 +        * We *could* save one or two of those with different
4135 +        * signature by casting and knowledge of the ABI, but it's
4136 +        * just not worth the maintenance headache.
4137 +        * For the ones we don't define here, the default (usually meaning
4138 +        * "unimplemented") makes sense.
4139 +        */
4140 +       .tx_empty       = serial_cris_tx_empty_dummy,
4141 +       .set_mctrl      = serial_cris_set_mctrl_dummy,
4142 +       .get_mctrl      = serial_cris_get_mctrl_dummy,
4143 +       .stop_tx        = serial_cris_stop_tx_dummy,
4144 +       .start_tx       = serial_cris_start_tx_dummy,
4145 +       .stop_rx        = serial_cris_stop_rx_dummy,
4146 +       .enable_ms      = serial_cris_enable_ms_dummy,
4147 +       .break_ctl      = serial_cris_break_ctl_dummy,
4148 +       .startup        = serial_cris_startup_dummy,
4149 +       .shutdown       = serial_cris_shutdown_dummy,
4150 +       .set_termios    = serial_cris_set_termios_dummy,
4151 +
4152 +       /* This one we keep the same. */
4153 +       .type           = serial_cris_type,
4154 +
4155 +       .release_port   = serial_cris_release_port_dummy,
4156 +       .request_port   = serial_cris_request_port_dummy,
4157 +
4158 +       /*
4159 +        * This one we keep the same too, as long as it doesn't do
4160 +        * anything else but to set the type.
4161 +        */
4162 +       .config_port    = serial_cris_config_port,
4163 +};
4164 +
4165 +static void cris_serial_port_init(struct uart_port *port, int line)
4166 +{
4167 +       struct uart_cris_port *up = (struct uart_cris_port *)port;
4168 +       static int first = 1;
4169 +
4170 +       if (up->initialized)
4171 +               return;
4172 +       up->initialized = 1;
4173 +       port->line = line;
4174 +       spin_lock_init(&port->lock);
4175 +       port->ops =
4176 +               up->regi_ser == 0 ? &serial_cris_dummy_pops :
4177 +               &serial_cris_pops;
4178 +       port->irq = up->irq;
4179 +       port->iobase = up->regi_ser ? up->regi_ser : 1;
4180 +       port->uartclk = 29493000;
4181 +
4182 +       /*
4183 +        * We can't fit any more than 255 here (unsigned char), though
4184 +        * actually UART_XMIT_SIZE characters could be pending output (if it
4185 +        * wasn't for the single test in transmit_chars_dma).  At time of this
4186 +        * writing, the definition of "fifosize" is here the amount of
4187 +        * characters that can be pending output after a start_tx call until
4188 +        * tx_empty returns 1: see serial_core.c:uart_wait_until_sent.  This
4189 +        * matters for timeout calculations unfortunately, but keeping larger
4190 +        * amounts at the DMA wouldn't win much so let's just play nice.
4191 +        */
4192 +       port->fifosize = 255;
4193 +       port->flags = UPF_BOOT_AUTOCONF;
4194 +
4195 +#ifdef CONFIG_ETRAX_RS485
4196 +       /* Set sane defaults. */
4197 +       up->rs485.rts_on_send = 0;
4198 +       up->rs485.rts_after_sent = 1;
4199 +       up->rs485.delay_rts_before_send = 0;
4200 +       if (up->port_type > TYPE_232)
4201 +               up->rs485.enabled = 1;
4202 +       else
4203 +               up->rs485.enabled = 0;
4204 +#endif
4205 +
4206 +       if (first) {
4207 +               first = 0;
4208 +#ifdef CONFIG_ETRAX_SERIAL_PORT0
4209 +               SETUP_PINS(0);
4210 +#endif
4211 +#ifdef CONFIG_ETRAX_SERIAL_PORT1
4212 +               SETUP_PINS(1);
4213 +#endif
4214 +#ifdef CONFIG_ETRAX_SERIAL_PORT2
4215 +               SETUP_PINS(2);
4216 +#endif
4217 +#ifdef CONFIG_ETRAX_SERIAL_PORT3
4218 +               SETUP_PINS(3);
4219 +#endif
4220 +       }
4221 +}
4222 +
4223 +static int __init serial_cris_init(void)
4224 +{
4225 +       int ret, i;
4226 +       reg_ser_rw_rec_ctrl rec_ctrl;
4227 +       printk(KERN_INFO "Serial: CRISv32 driver $Revision: 1.78 $ ");
4228 +
4229 +       ret = uart_register_driver(&serial_cris_driver);
4230 +       if (ret)
4231 +               goto out;
4232 +
4233 +       for (i = 0; i < UART_NR; i++) {
4234 +               if (serial_cris_ports[i].used) {
4235 +#ifdef CONFIG_ETRAX_RS485
4236 +                       /* Make sure that the RTS pin stays low when allocating
4237 +                        * pins for a port in 485 mode. 
4238 +                        */
4239 +                       if (serial_cris_ports[i].port_type > TYPE_232) {
4240 +                               rec_ctrl = REG_RD(ser, serial_cris_ports[i].regi_ser, rw_rec_ctrl);
4241 +                               rec_ctrl.rts_n = regk_ser_active;
4242 +                               REG_WR(ser, serial_cris_ports[i].regi_ser, rw_rec_ctrl, rec_ctrl); 
4243 +                       }
4244 +#endif
4245 +                       switch (serial_cris_ports[i].regi_ser) {
4246 +                       case regi_ser1:
4247 +                               if (crisv32_pinmux_alloc_fixed(pinmux_ser1)) {
4248 +                                       printk("Failed to allocate pins for ser1, disable port\n");
4249 +                                       serial_cris_ports[i].used = 0;
4250 +                                       continue;
4251 +                               }
4252 +                               break;
4253 +                       case regi_ser2:
4254 +                               if (crisv32_pinmux_alloc_fixed(pinmux_ser2)) {
4255 +                                       printk("Failed to allocate pins for ser2, disable port\n");
4256 +                                       serial_cris_ports[i].used = 0;
4257 +                                       continue;
4258 +                               }
4259 +                               break;
4260 +                       case regi_ser3:
4261 +                               if (crisv32_pinmux_alloc_fixed(pinmux_ser3)) {
4262 +                                       printk("Failed to allocate pins for ser3, disable port\n");
4263 +                                       serial_cris_ports[i].used = 0;
4264 +                                       continue;
4265 +                               }
4266 +                               break;
4267 +                       }
4268 +
4269 +                       struct uart_port *port = &serial_cris_ports[i].port;
4270 +                       cris_console.index = i;
4271 +                       cris_serial_port_init(port, i);
4272 +                       uart_add_one_port(&serial_cris_driver, port);
4273 +               }
4274 +       }
4275 +
4276 +out:
4277 +       return ret;
4278 +}
4279 +
4280 +static void __exit serial_cris_exit(void)
4281 +{
4282 +       int i;
4283 +       for (i = 0; i < UART_NR; i++)
4284 +               if (serial_cris_ports[i].used) {
4285 +                       switch (serial_cris_ports[i].regi_ser) {
4286 +                       case regi_ser1:
4287 +                               crisv32_pinmux_dealloc_fixed(pinmux_ser1);
4288 +                               break;
4289 +                       case regi_ser2:
4290 +                               crisv32_pinmux_dealloc_fixed(pinmux_ser2);
4291 +                               break;
4292 +                       case regi_ser3:
4293 +                               crisv32_pinmux_dealloc_fixed(pinmux_ser3);
4294 +                               break;
4295 +                       }
4296 +                       uart_remove_one_port(&serial_cris_driver,
4297 +                                &serial_cris_ports[i].port);
4298 +               }
4299 +       uart_unregister_driver(&serial_cris_driver);
4300 +}
4301 +
4302 +module_init(serial_cris_init);
4303 +module_exit(serial_cris_exit);
4304 --- linux-2.6.19.2.orig/drivers/usb/host/hc_crisv10.c   2007-01-10 20:10:37.000000000 +0100
4305 +++ linux-2.6.19.2.dev/drivers/usb/host/hc-crisv10.c    2007-02-26 20:58:29.000000000 +0100
4306 @@ -1,219 +1,51 @@
4307  /*
4308 - * usb-host.c: ETRAX 100LX USB Host Controller Driver (HCD)
4309   *
4310 - * Copyright (c) 2002, 2003 Axis Communications AB.
4311 + * ETRAX 100LX USB Host Controller Driver
4312 + *
4313 + * Copyright (C) 2005, 2006  Axis Communications AB
4314 + *
4315 + * Author: Konrad Eriksson <konrad.eriksson@axis.se>
4316 + *
4317   */
4318  
4319 +#include <linux/module.h>
4320  #include <linux/kernel.h>
4321 -#include <linux/delay.h>
4322 -#include <linux/ioport.h>
4323 -#include <linux/sched.h>
4324 -#include <linux/slab.h>
4325 -#include <linux/errno.h>
4326 -#include <linux/unistd.h>
4327 -#include <linux/interrupt.h>
4328  #include <linux/init.h>
4329 -#include <linux/list.h>
4330 +#include <linux/moduleparam.h>
4331  #include <linux/spinlock.h>
4332 +#include <linux/usb.h>
4333 +#include <linux/platform_device.h>
4334  
4335 -#include <asm/uaccess.h>
4336  #include <asm/io.h>
4337  #include <asm/irq.h>
4338 -#include <asm/dma.h>
4339 -#include <asm/system.h>
4340 -#include <asm/arch/svinto.h>
4341 +#include <asm/arch/dma.h>
4342 +#include <asm/arch/io_interface_mux.h>
4343  
4344 -#include <linux/usb.h>
4345 -/* Ugly include because we don't live with the other host drivers. */
4346 -#include <../drivers/usb/core/hcd.h>
4347 -#include <../drivers/usb/core/usb.h>
4348 -
4349 -#include "hc_crisv10.h"
4350 +#include "../core/hcd.h"
4351 +#include "../core/hub.h"
4352 +#include "hc-crisv10.h"
4353 +#include "hc-cris-dbg.h"
4354 +
4355 +
4356 +/***************************************************************************/
4357 +/***************************************************************************/
4358 +/* Host Controller settings                                                */
4359 +/***************************************************************************/
4360 +/***************************************************************************/
4361 +
4362 +#define VERSION                        "1.00"
4363 +#define COPYRIGHT              "(c) 2005, 2006 Axis Communications AB"
4364 +#define DESCRIPTION             "ETRAX 100LX USB Host Controller"
4365  
4366  #define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
4367  #define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
4368  #define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
4369  
4370 -static const char *usb_hcd_version = "$Revision: 1.2 $";
4371 -
4372 -#undef KERN_DEBUG
4373 -#define KERN_DEBUG ""
4374 -
4375 -
4376 -#undef USB_DEBUG_RH
4377 -#undef USB_DEBUG_EPID
4378 -#undef USB_DEBUG_SB
4379 -#undef USB_DEBUG_DESC
4380 -#undef USB_DEBUG_URB
4381 -#undef USB_DEBUG_TRACE
4382 -#undef USB_DEBUG_BULK
4383 -#undef USB_DEBUG_CTRL
4384 -#undef USB_DEBUG_INTR
4385 -#undef USB_DEBUG_ISOC
4386 -
4387 -#ifdef USB_DEBUG_RH
4388 -#define dbg_rh(format, arg...) printk(KERN_DEBUG __FILE__ ": (RH) " format "\n" , ## arg)
4389 -#else
4390 -#define dbg_rh(format, arg...) do {} while (0)
4391 -#endif
4392 -
4393 -#ifdef USB_DEBUG_EPID
4394 -#define dbg_epid(format, arg...) printk(KERN_DEBUG __FILE__ ": (EPID) " format "\n" , ## arg)
4395 -#else
4396 -#define dbg_epid(format, arg...) do {} while (0)
4397 -#endif
4398 -
4399 -#ifdef USB_DEBUG_SB
4400 -#define dbg_sb(format, arg...) printk(KERN_DEBUG __FILE__ ": (SB) " format "\n" , ## arg)
4401 -#else
4402 -#define dbg_sb(format, arg...) do {} while (0)
4403 -#endif
4404 -
4405 -#ifdef USB_DEBUG_CTRL
4406 -#define dbg_ctrl(format, arg...) printk(KERN_DEBUG __FILE__ ": (CTRL) " format "\n" , ## arg)
4407 -#else
4408 -#define dbg_ctrl(format, arg...) do {} while (0)
4409 -#endif
4410 -
4411 -#ifdef USB_DEBUG_BULK
4412 -#define dbg_bulk(format, arg...) printk(KERN_DEBUG __FILE__ ": (BULK) " format "\n" , ## arg)
4413 -#else
4414 -#define dbg_bulk(format, arg...) do {} while (0)
4415 -#endif
4416 -
4417 -#ifdef USB_DEBUG_INTR
4418 -#define dbg_intr(format, arg...) printk(KERN_DEBUG __FILE__ ": (INTR) " format "\n" , ## arg)
4419 -#else
4420 -#define dbg_intr(format, arg...) do {} while (0)
4421 -#endif
4422 -
4423 -#ifdef USB_DEBUG_ISOC
4424 -#define dbg_isoc(format, arg...) printk(KERN_DEBUG __FILE__ ": (ISOC) " format "\n" , ## arg)
4425 -#else
4426 -#define dbg_isoc(format, arg...) do {} while (0)
4427 -#endif
4428 -
4429 -#ifdef USB_DEBUG_TRACE
4430 -#define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
4431 -#define DBFEXIT  (printk(": Exiting:  %s\n", __FUNCTION__))
4432 -#else
4433 -#define DBFENTER do {} while (0)
4434 -#define DBFEXIT  do {} while (0)
4435 -#endif
4436 -
4437 -#define usb_pipeslow(pipe)     (((pipe) >> 26) & 1)
4438 -
4439 -/*-------------------------------------------------------------------
4440 - Virtual Root Hub
4441 - -------------------------------------------------------------------*/
4442 -
4443 -static __u8 root_hub_dev_des[] =
4444 -{
4445 -       0x12,  /*  __u8  bLength; */
4446 -       0x01,  /*  __u8  bDescriptorType; Device */
4447 -       0x00,  /*  __le16 bcdUSB; v1.0 */
4448 -       0x01,
4449 -       0x09,  /*  __u8  bDeviceClass; HUB_CLASSCODE */
4450 -       0x00,  /*  __u8  bDeviceSubClass; */
4451 -       0x00,  /*  __u8  bDeviceProtocol; */
4452 -       0x08,  /*  __u8  bMaxPacketSize0; 8 Bytes */
4453 -       0x00,  /*  __le16 idVendor; */
4454 -       0x00,
4455 -       0x00,  /*  __le16 idProduct; */
4456 -       0x00,
4457 -       0x00,  /*  __le16 bcdDevice; */
4458 -       0x00,
4459 -       0x00,  /*  __u8  iManufacturer; */
4460 -       0x02,  /*  __u8  iProduct; */
4461 -       0x01,  /*  __u8  iSerialNumber; */
4462 -       0x01   /*  __u8  bNumConfigurations; */
4463 -};
4464 -
4465 -/* Configuration descriptor */
4466 -static __u8 root_hub_config_des[] =
4467 -{
4468 -       0x09,  /*  __u8  bLength; */
4469 -       0x02,  /*  __u8  bDescriptorType; Configuration */
4470 -       0x19,  /*  __le16 wTotalLength; */
4471 -       0x00,
4472 -       0x01,  /*  __u8  bNumInterfaces; */
4473 -       0x01,  /*  __u8  bConfigurationValue; */
4474 -       0x00,  /*  __u8  iConfiguration; */
4475 -       0x40,  /*  __u8  bmAttributes; Bit 7: Bus-powered */
4476 -       0x00,  /*  __u8  MaxPower; */
4477 -
4478 -     /* interface */
4479 -       0x09,  /*  __u8  if_bLength; */
4480 -       0x04,  /*  __u8  if_bDescriptorType; Interface */
4481 -       0x00,  /*  __u8  if_bInterfaceNumber; */
4482 -       0x00,  /*  __u8  if_bAlternateSetting; */
4483 -       0x01,  /*  __u8  if_bNumEndpoints; */
4484 -       0x09,  /*  __u8  if_bInterfaceClass; HUB_CLASSCODE */
4485 -       0x00,  /*  __u8  if_bInterfaceSubClass; */
4486 -       0x00,  /*  __u8  if_bInterfaceProtocol; */
4487 -       0x00,  /*  __u8  if_iInterface; */
4488 -
4489 -     /* endpoint */
4490 -       0x07,  /*  __u8  ep_bLength; */
4491 -       0x05,  /*  __u8  ep_bDescriptorType; Endpoint */
4492 -       0x81,  /*  __u8  ep_bEndpointAddress; IN Endpoint 1 */
4493 -       0x03,  /*  __u8  ep_bmAttributes; Interrupt */
4494 -       0x08,  /*  __le16 ep_wMaxPacketSize; 8 Bytes */
4495 -       0x00,
4496 -       0xff   /*  __u8  ep_bInterval; 255 ms */
4497 -};
4498 -
4499 -static __u8 root_hub_hub_des[] =
4500 -{
4501 -       0x09,  /*  __u8  bLength; */
4502 -       0x29,  /*  __u8  bDescriptorType; Hub-descriptor */
4503 -       0x02,  /*  __u8  bNbrPorts; */
4504 -       0x00,  /* __u16  wHubCharacteristics; */
4505 -       0x00,
4506 -       0x01,  /*  __u8  bPwrOn2pwrGood; 2ms */
4507 -       0x00,  /*  __u8  bHubContrCurrent; 0 mA */
4508 -       0x00,  /*  __u8  DeviceRemovable; *** 7 Ports max *** */
4509 -       0xff   /*  __u8  PortPwrCtrlMask; *** 7 ports max *** */
4510 -};
4511 -
4512 -static DEFINE_TIMER(bulk_start_timer, NULL, 0, 0);
4513 -static DEFINE_TIMER(bulk_eot_timer, NULL, 0, 0);
4514 -
4515 -/* We want the start timer to expire before the eot timer, because the former might start
4516 -   traffic, thus making it unnecessary for the latter to time out. */
4517 -#define BULK_START_TIMER_INTERVAL (HZ/10) /* 100 ms */
4518 -#define BULK_EOT_TIMER_INTERVAL (HZ/10+2) /* 120 ms */
4519 -
4520 -#define OK(x) len = (x); dbg_rh("OK(%d): line: %d", x, __LINE__); break
4521 -#define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
4522 -{panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
4523 -
4524 -#define SLAB_FLAG     (in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL)
4525 -#define KMALLOC_FLAG  (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
4526 -
4527 -/* Most helpful debugging aid */
4528 -#define assert(expr) ((void) ((expr) ? 0 : (err("assert failed at line %d",__LINE__))))
4529 -
4530 -/* Alternative assert define which stops after a failed assert. */
4531 -/*
4532 -#define assert(expr)                                      \
4533 -{                                                         \
4534 -        if (!(expr)) {                                    \
4535 -                err("assert failed at line %d",__LINE__); \
4536 -                while (1);                                \
4537 -        }                                                 \
4538 -}
4539 -*/
4540 -
4541 +/* Number of physical ports in Etrax 100LX */
4542 +#define USB_ROOT_HUB_PORTS 2
4543  
4544 -/* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it dynamically?
4545 -   To adjust it dynamically we would have to get an interrupt when we reach the end
4546 -   of the rx descriptor list, or when we get close to the end, and then allocate more
4547 -   descriptors. */
4548 -
4549 -#define NBR_OF_RX_DESC     512
4550 -#define RX_DESC_BUF_SIZE   1024
4551 -#define RX_BUF_SIZE        (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
4552 +const char hc_name[] = "hc-crisv10";
4553 +const char product_desc[] = DESCRIPTION;
4554  
4555  /* The number of epids is, among other things, used for pre-allocating
4556     ctrl, bulk and isoc EP descriptors (one for each epid).
4557 @@ -221,4332 +53,4632 @@
4558  #define NBR_OF_EPIDS       32
4559  
4560  /* Support interrupt traffic intervals up to 128 ms. */
4561 -#define MAX_INTR_INTERVAL 128
4562 +#define MAX_INTR_INTERVAL  128
4563  
4564 -/* If periodic traffic (intr or isoc) is to be used, then one entry in the EP table
4565 -   must be "invalid". By this we mean that we shouldn't care about epid attentions
4566 -   for this epid, or at least handle them differently from epid attentions for "valid"
4567 -   epids. This define determines which one to use (don't change it). */
4568 -#define INVALID_EPID     31
4569 +/* If periodic traffic (intr or isoc) is to be used, then one entry in the EP
4570 +   table must be "invalid". By this we mean that we shouldn't care about epid
4571 +   attentions for this epid, or at least handle them differently from epid
4572 +   attentions for "valid" epids. This define determines which one to use
4573 +   (don't change it). */
4574 +#define INVALID_EPID       31
4575  /* A special epid for the bulk dummys. */
4576 -#define DUMMY_EPID       30
4577 -
4578 -/* This is just a software cache for the valid entries in R_USB_EPT_DATA. */
4579 -static __u32 epid_usage_bitmask;
4580 -
4581 -/* A bitfield to keep information on in/out traffic is needed to uniquely identify
4582 -   an endpoint on a device, since the most significant bit which indicates traffic
4583 -   direction is lacking in the ep_id field (ETRAX epids can handle both in and
4584 -   out traffic on endpoints that are otherwise identical). The USB framework, however,
4585 -   relies on them to be handled separately.  For example, bulk IN and OUT urbs cannot
4586 -   be queued in the same list, since they would block each other. */
4587 -static __u32 epid_out_traffic;
4588 -
4589 -/* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
4590 -   Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be cache aligned. */
4591 -static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
4592 -static volatile USB_IN_Desc_t RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
4593 -
4594 -/* Pointers into RxDescList. */
4595 -static volatile USB_IN_Desc_t *myNextRxDesc;
4596 -static volatile USB_IN_Desc_t *myLastRxDesc;
4597 -static volatile USB_IN_Desc_t *myPrevRxDesc;
4598 -
4599 -/* EP descriptors must be 32-bit aligned. */
4600 -static volatile USB_EP_Desc_t TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
4601 -static volatile USB_EP_Desc_t TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
4602 -/* After each enabled bulk EP (IN or OUT) we put two disabled EP descriptors with the eol flag set,
4603 -   causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
4604 -   gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
4605 -   EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
4606 -   in each frame. */
4607 -static volatile USB_EP_Desc_t TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
4608 -
4609 -static volatile USB_EP_Desc_t TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
4610 -static volatile USB_SB_Desc_t TxIsocSB_zout __attribute__ ((aligned (4)));
4611 -
4612 -static volatile USB_EP_Desc_t TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
4613 -static volatile USB_SB_Desc_t TxIntrSB_zout __attribute__ ((aligned (4)));
4614 -
4615 -/* A zout transfer makes a memory access at the address of its buf pointer, which means that setting
4616 -   this buf pointer to 0 will cause an access to the flash. In addition to this, setting sw_len to 0
4617 -   results in a 16/32 bytes (depending on DMA burst size) transfer. Instead, we set it to 1, and point
4618 -   it to this buffer. */
4619 -static int zout_buffer[4] __attribute__ ((aligned (4)));
4620 +#define DUMMY_EPID         30
4621  
4622 -/* Cache for allocating new EP and SB descriptors. */
4623 -static kmem_cache_t *usb_desc_cache;
4624 +/* Module settings */
4625  
4626 -/* Cache for the registers allocated in the top half. */
4627 -static kmem_cache_t *top_half_reg_cache;
4628 +MODULE_DESCRIPTION(DESCRIPTION);
4629 +MODULE_LICENSE("GPL");
4630 +MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>");
4631  
4632 -/* Cache for the data allocated in the isoc descr top half. */
4633 -static kmem_cache_t *isoc_compl_cache;
4634  
4635 -static struct usb_bus *etrax_usb_bus;
4636 +/* Module parameters */
4637  
4638 -/* This is a circular (double-linked) list of the active urbs for each epid.
4639 -   The head is never removed, and new urbs are linked onto the list as
4640 -   urb_entry_t elements. Don't reference urb_list directly; use the wrapper
4641 -   functions instead. Note that working with these lists might require spinlock
4642 -   protection. */
4643 -static struct list_head urb_list[NBR_OF_EPIDS];
4644 +/* 0 = No ports enabled
4645 +   1 = Only port 1 enabled (on board ethernet on devboard)
4646 +   2 = Only port 2 enabled (external connector on devboard)
4647 +   3 = Both ports enabled
4648 +*/
4649 +static unsigned int ports = 3;
4650 +module_param(ports, uint, S_IRUGO);
4651 +MODULE_PARM_DESC(ports, "Bitmask indicating USB ports to use");
4652  
4653 -/* Read about the need and usage of this lock in submit_ctrl_urb. */
4654 -static spinlock_t urb_list_lock;
4655  
4656 -/* Used when unlinking asynchronously. */
4657 -static struct list_head urb_unlink_list;
4658 +/***************************************************************************/
4659 +/***************************************************************************/
4660 +/* Shared global variables for this module                                 */
4661 +/***************************************************************************/
4662 +/***************************************************************************/
4663  
4664 -/* for returning string descriptors in UTF-16LE */
4665 -static int ascii2utf (char *ascii, __u8 *utf, int utfmax)
4666 -{
4667 -       int retval;
4668 +/* EP descriptor lists for non period transfers. Must be 32-bit aligned. */
4669 +static volatile struct USB_EP_Desc TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
4670  
4671 -       for (retval = 0; *ascii && utfmax > 1; utfmax -= 2, retval += 2) {
4672 -               *utf++ = *ascii++ & 0x7f;
4673 -               *utf++ = 0;
4674 -       }
4675 -       return retval;
4676 -}
4677 +static volatile struct USB_EP_Desc TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
4678  
4679 -static int usb_root_hub_string (int id, int serial, char *type, __u8 *data, int len)
4680 -{
4681 -       char buf [30];
4682 +/* EP descriptor lists for period transfers. Must be 32-bit aligned. */
4683 +static volatile struct USB_EP_Desc TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
4684 +static volatile struct USB_SB_Desc TxIntrSB_zout __attribute__ ((aligned (4)));
4685  
4686 -       // assert (len > (2 * (sizeof (buf) + 1)));
4687 -       // assert (strlen (type) <= 8);
4688 +static volatile struct USB_EP_Desc TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
4689 +static volatile struct USB_SB_Desc TxIsocSB_zout __attribute__ ((aligned (4)));
4690  
4691 -       // language ids
4692 -       if (id == 0) {
4693 -               *data++ = 4; *data++ = 3;       /* 4 bytes data */
4694 -               *data++ = 0; *data++ = 0;       /* some language id */
4695 -               return 4;
4696 -
4697 -       // serial number
4698 -       } else if (id == 1) {
4699 -               sprintf (buf, "%x", serial);
4700 -
4701 -       // product description
4702 -       } else if (id == 2) {
4703 -               sprintf (buf, "USB %s Root Hub", type);
4704 -
4705 -       // id 3 == vendor description
4706 -
4707 -       // unsupported IDs --> "stall"
4708 -       } else
4709 -           return 0;
4710 -
4711 -       data [0] = 2 + ascii2utf (buf, data + 2, len - 2);
4712 -       data [1] = 3;
4713 -       return data [0];
4714 -}
4715 +static volatile struct USB_SB_Desc TxIsocSBList[NBR_OF_EPIDS] __attribute__ ((aligned (4))); 
4716  
4717 -/* Wrappers around the list functions (include/linux/list.h). */
4718 +/* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set,
4719 +   causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
4720 +   gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
4721 +   EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
4722 +   in each frame. */
4723 +static volatile struct USB_EP_Desc TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
4724  
4725 -static inline int urb_list_empty(int epid)
4726 +/* List of URB pointers, where each points to the active URB for a epid.
4727 +   For Bulk, Ctrl and Intr this means which URB that currently is added to
4728 +   DMA lists (Isoc URBs are all directly added to DMA lists). As soon as
4729 +   URB has completed is the queue examined and the first URB in queue is
4730 +   removed and moved to the activeUrbList while its state change to STARTED and
4731 +   its transfer(s) gets added to DMA list (exception Isoc where URBs enter
4732 +   state STARTED directly and added transfers added to DMA lists). */
4733 +static struct urb *activeUrbList[NBR_OF_EPIDS];
4734 +
4735 +/* Additional software state info for each epid */
4736 +static struct etrax_epid epid_state[NBR_OF_EPIDS];
4737 +
4738 +/* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops
4739 +   even if there is new data waiting to be processed */
4740 +static struct timer_list bulk_start_timer = TIMER_INITIALIZER(NULL, 0, 0);
4741 +static struct timer_list bulk_eot_timer = TIMER_INITIALIZER(NULL, 0, 0);
4742 +
4743 +/* We want the start timer to expire before the eot timer, because the former
4744 +   might start traffic, thus making it unnecessary for the latter to time
4745 +   out. */
4746 +#define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */
4747 +#define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */
4748 +
4749 +/* Delay before a URB completion happen when it's scheduled to be delayed */
4750 +#define LATER_TIMER_DELAY (HZ/50) /* 20 ms */
4751 +
4752 +/* Simplifying macros for checking software state info of a epid */
4753 +/* ----------------------------------------------------------------------- */
4754 +#define epid_inuse(epid)       epid_state[epid].inuse
4755 +#define epid_out_traffic(epid) epid_state[epid].out_traffic
4756 +#define epid_isoc(epid)   (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0)
4757 +#define epid_intr(epid)   (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0)
4758 +
4759 +
4760 +/***************************************************************************/
4761 +/***************************************************************************/
4762 +/* DEBUG FUNCTIONS                                                         */
4763 +/***************************************************************************/
4764 +/***************************************************************************/
4765 +/* Note that these functions are always available in their "__" variants,
4766 +   for use in error situations. The "__" missing variants are controlled by
4767 +   the USB_DEBUG_DESC/USB_DEBUG_URB macros. */
4768 +static void __dump_urb(struct urb* purb)
4769  {
4770 -       return list_empty(&urb_list[epid]);
4771 +  struct crisv10_urb_priv *urb_priv = purb->hcpriv;
4772 +  int urb_num = -1;
4773 +  if(urb_priv) {
4774 +    urb_num = urb_priv->urb_num;
4775 +  }
4776 +  printk("\nURB:0x%x[%d]\n", (unsigned int)purb, urb_num);
4777 +  printk("dev                   :0x%08lx\n", (unsigned long)purb->dev);
4778 +  printk("pipe                  :0x%08x\n", purb->pipe);
4779 +  printk("status                :%d\n", purb->status);
4780 +  printk("transfer_flags        :0x%08x\n", purb->transfer_flags);
4781 +  printk("transfer_buffer       :0x%08lx\n", (unsigned long)purb->transfer_buffer);
4782 +  printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
4783 +  printk("actual_length         :%d\n", purb->actual_length);
4784 +  printk("setup_packet          :0x%08lx\n", (unsigned long)purb->setup_packet);
4785 +  printk("start_frame           :%d\n", purb->start_frame);
4786 +  printk("number_of_packets     :%d\n", purb->number_of_packets);
4787 +  printk("interval              :%d\n", purb->interval);
4788 +  printk("error_count           :%d\n", purb->error_count);
4789 +  printk("context               :0x%08lx\n", (unsigned long)purb->context);
4790 +  printk("complete              :0x%08lx\n\n", (unsigned long)purb->complete);
4791 +}
4792 +
4793 +static void __dump_in_desc(volatile struct USB_IN_Desc *in)
4794 +{
4795 +  printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
4796 +  printk("  sw_len  : 0x%04x (%d)\n", in->sw_len, in->sw_len);
4797 +  printk("  command : 0x%04x\n", in->command);
4798 +  printk("  next    : 0x%08lx\n", in->next);
4799 +  printk("  buf     : 0x%08lx\n", in->buf);
4800 +  printk("  hw_len  : 0x%04x (%d)\n", in->hw_len, in->hw_len);
4801 +  printk("  status  : 0x%04x\n\n", in->status);
4802 +}
4803 +
4804 +static void __dump_sb_desc(volatile struct USB_SB_Desc *sb)
4805 +{
4806 +  char tt = (sb->command & 0x30) >> 4;
4807 +  char *tt_string;
4808 +
4809 +  switch (tt) {
4810 +  case 0:
4811 +    tt_string = "zout";
4812 +    break;
4813 +  case 1:
4814 +    tt_string = "in";
4815 +    break;
4816 +  case 2:
4817 +    tt_string = "out";
4818 +    break;
4819 +  case 3:
4820 +    tt_string = "setup";
4821 +    break;
4822 +  default:
4823 +    tt_string = "unknown (weird)";
4824 +  }
4825 +
4826 +  printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb);
4827 +  printk(" command:0x%04x (", sb->command);
4828 +  printk("rem:%d ", (sb->command & 0x3f00) >> 8);
4829 +  printk("full:%d ", (sb->command & 0x40) >> 6);
4830 +  printk("tt:%d(%s) ", tt, tt_string);
4831 +  printk("intr:%d ", (sb->command & 0x8) >> 3);
4832 +  printk("eot:%d ", (sb->command & 0x2) >> 1);
4833 +  printk("eol:%d)", sb->command & 0x1);
4834 +  printk(" sw_len:0x%04x(%d)", sb->sw_len, sb->sw_len);
4835 +  printk(" next:0x%08lx", sb->next);
4836 +  printk(" buf:0x%08lx\n", sb->buf);
4837 +}
4838 +
4839 +
4840 +static void __dump_ep_desc(volatile struct USB_EP_Desc *ep)
4841 +{
4842 +  printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep);
4843 +  printk(" command:0x%04x (", ep->command);
4844 +  printk("ep_id:%d ", (ep->command & 0x1f00) >> 8);
4845 +  printk("enable:%d ", (ep->command & 0x10) >> 4);
4846 +  printk("intr:%d ", (ep->command & 0x8) >> 3);
4847 +  printk("eof:%d ", (ep->command & 0x2) >> 1);
4848 +  printk("eol:%d)", ep->command & 0x1);
4849 +  printk(" hw_len:0x%04x(%d)", ep->hw_len, ep->hw_len);
4850 +  printk(" next:0x%08lx", ep->next);
4851 +  printk(" sub:0x%08lx\n", ep->sub);
4852  }
4853  
4854 -/* Returns first urb for this epid, or NULL if list is empty. */
4855 -static inline struct urb *urb_list_first(int epid)
4856 +static inline void __dump_ep_list(int pipe_type)
4857  {
4858 -       struct urb *first_urb = 0;
4859 +  volatile struct USB_EP_Desc *ep;
4860 +  volatile struct USB_EP_Desc *first_ep;
4861 +  volatile struct USB_SB_Desc *sb;
4862 +
4863 +  switch (pipe_type)
4864 +    {
4865 +    case PIPE_BULK:
4866 +      first_ep = &TxBulkEPList[0];
4867 +      break;
4868 +    case PIPE_CONTROL:
4869 +      first_ep = &TxCtrlEPList[0];
4870 +      break;
4871 +    case PIPE_INTERRUPT:
4872 +      first_ep = &TxIntrEPList[0];
4873 +      break;
4874 +    case PIPE_ISOCHRONOUS:
4875 +      first_ep = &TxIsocEPList[0];
4876 +      break;
4877 +    default:
4878 +      warn("Cannot dump unknown traffic type");
4879 +      return;
4880 +    }
4881 +  ep = first_ep;
4882 +
4883 +  printk("\n\nDumping EP list...\n\n");
4884 +
4885 +  do {
4886 +    __dump_ep_desc(ep);
4887 +    /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
4888 +    sb = ep->sub ? phys_to_virt(ep->sub) : 0;
4889 +    while (sb) {
4890 +      __dump_sb_desc(sb);
4891 +      sb = sb->next ? phys_to_virt(sb->next) : 0;
4892 +    }
4893 +    ep = (volatile struct USB_EP_Desc *)(phys_to_virt(ep->next));
4894  
4895 -       if (!urb_list_empty(epid)) {
4896 -               /* Get the first urb (i.e. head->next). */
4897 -               urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list);
4898 -               first_urb = urb_entry->urb;
4899 -       }
4900 -       return first_urb;
4901 +  } while (ep != first_ep);
4902  }
4903  
4904 -/* Adds an urb_entry last in the list for this epid. */
4905 -static inline void urb_list_add(struct urb *urb, int epid)
4906 +static inline void __dump_ept_data(int epid)
4907  {
4908 -       urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), KMALLOC_FLAG);
4909 -       assert(urb_entry);
4910 +  unsigned long flags;
4911 +  __u32 r_usb_ept_data;
4912  
4913 -       urb_entry->urb = urb;
4914 -       list_add_tail(&urb_entry->list, &urb_list[epid]);
4915 +  if (epid < 0 || epid > 31) {
4916 +    printk("Cannot dump ept data for invalid epid %d\n", epid);
4917 +    return;
4918 +  }
4919 +
4920 +  local_irq_save(flags);
4921 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
4922 +  nop();
4923 +  r_usb_ept_data = *R_USB_EPT_DATA;
4924 +  local_irq_restore(flags);
4925 +
4926 +  printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
4927 +  if (r_usb_ept_data == 0) {
4928 +    /* No need for more detailed printing. */
4929 +    return;
4930 +  }
4931 +  printk("  valid           : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
4932 +  printk("  hold            : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
4933 +  printk("  error_count_in  : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
4934 +  printk("  t_in            : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
4935 +  printk("  low_speed       : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
4936 +  printk("  port            : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
4937 +  printk("  error_code      : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
4938 +  printk("  t_out           : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
4939 +  printk("  error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
4940 +  printk("  max_len         : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
4941 +  printk("  ep              : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
4942 +  printk("  dev             : %d\n", (r_usb_ept_data & 0x0000003f));
4943 +}
4944 +
4945 +static inline void __dump_ept_data_iso(int epid)
4946 +{
4947 +  unsigned long flags;
4948 +  __u32 ept_data;
4949 +
4950 +  if (epid < 0 || epid > 31) {
4951 +    printk("Cannot dump ept data for invalid epid %d\n", epid);
4952 +    return;
4953 +  }
4954 +
4955 +  local_irq_save(flags);
4956 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
4957 +  nop();
4958 +  ept_data = *R_USB_EPT_DATA_ISO;
4959 +  local_irq_restore(flags);
4960 +
4961 +  printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data, epid);
4962 +  if (ept_data == 0) {
4963 +    /* No need for more detailed printing. */
4964 +    return;
4965 +  }
4966 +  printk("  valid           : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, valid,
4967 +                                               ept_data));
4968 +  printk("  port            : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, port,
4969 +                                               ept_data));
4970 +  printk("  error_code      : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code,
4971 +                                               ept_data));
4972 +  printk("  max_len         : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len,
4973 +                                               ept_data));
4974 +  printk("  ep              : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, ep,
4975 +                                               ept_data));
4976 +  printk("  dev             : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, dev,
4977 +                                               ept_data));
4978  }
4979  
4980 -/* Search through the list for an element that contains this urb. (The list
4981 -   is expected to be short and the one we are about to delete will often be
4982 -   the first in the list.) */
4983 -static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid)
4984 +static inline void __dump_ept_data_list(void)
4985  {
4986 -       struct list_head *entry;
4987 -       struct list_head *tmp;
4988 -       urb_entry_t *urb_entry;
4989 -
4990 -       list_for_each_safe(entry, tmp, &urb_list[epid]) {
4991 -               urb_entry = list_entry(entry, urb_entry_t, list);
4992 -               assert(urb_entry);
4993 -               assert(urb_entry->urb);
4994 -
4995 -               if (urb_entry->urb == urb) {
4996 -                       return urb_entry;
4997 -               }
4998 -       }
4999 -       return 0;
5000 -}
5001 +  int i;
5002  
5003 -/* Delete an urb from the list. */
5004 -static inline void urb_list_del(struct urb *urb, int epid)
5005 -{
5006 -       urb_entry_t *urb_entry = __urb_list_entry(urb, epid);
5007 -       assert(urb_entry);
5008 +  printk("Dumping the whole R_USB_EPT_DATA list\n");
5009  
5010 -       /* Delete entry and free. */
5011 -       list_del(&urb_entry->list);
5012 -       kfree(urb_entry);
5013 +  for (i = 0; i < 32; i++) {
5014 +    __dump_ept_data(i);
5015 +  }
5016 +}
5017 +
5018 +static void debug_epid(int epid) {
5019 +  int i;
5020 +  
5021 +  if(epid_isoc(epid)) {
5022 +    __dump_ept_data_iso(epid);
5023 +  } else {
5024 +    __dump_ept_data(epid);
5025 +  }
5026 +
5027 +  printk("Bulk:\n");
5028 +  for(i = 0; i < 32; i++) {
5029 +    if(IO_EXTRACT(USB_EP_command, epid, TxBulkEPList[i].command) ==
5030 +       epid) {
5031 +      printk("%d: ", i); __dump_ep_desc(&(TxBulkEPList[i]));
5032 +    }
5033 +  }
5034 +
5035 +  printk("Ctrl:\n");
5036 +  for(i = 0; i < 32; i++) {
5037 +    if(IO_EXTRACT(USB_EP_command, epid, TxCtrlEPList[i].command) ==
5038 +       epid) {
5039 +      printk("%d: ", i); __dump_ep_desc(&(TxCtrlEPList[i]));
5040 +    }
5041 +  }
5042 +
5043 +  printk("Intr:\n");
5044 +  for(i = 0; i < MAX_INTR_INTERVAL; i++) {
5045 +    if(IO_EXTRACT(USB_EP_command, epid, TxIntrEPList[i].command) ==
5046 +       epid) {
5047 +      printk("%d: ", i); __dump_ep_desc(&(TxIntrEPList[i]));
5048 +    }
5049 +  }
5050 +  
5051 +  printk("Isoc:\n");
5052 +  for(i = 0; i < 32; i++) {
5053 +    if(IO_EXTRACT(USB_EP_command, epid, TxIsocEPList[i].command) ==
5054 +       epid) {
5055 +      printk("%d: ", i); __dump_ep_desc(&(TxIsocEPList[i]));
5056 +    }
5057 +  }
5058 +
5059 +  __dump_ept_data_list();
5060 +  __dump_ep_list(PIPE_INTERRUPT);
5061 +  printk("\n\n");
5062 +}
5063 +
5064 +
5065 +
5066 +char* hcd_status_to_str(__u8 bUsbStatus) {
5067 +  static char hcd_status_str[128];
5068 +  hcd_status_str[0] = '\0';
5069 +  if(bUsbStatus & IO_STATE(R_USB_STATUS, ourun, yes)) {
5070 +    strcat(hcd_status_str, "ourun ");
5071 +  }
5072 +  if(bUsbStatus & IO_STATE(R_USB_STATUS, perror, yes)) {
5073 +    strcat(hcd_status_str, "perror ");
5074 +  }
5075 +  if(bUsbStatus & IO_STATE(R_USB_STATUS, device_mode, yes)) {
5076 +    strcat(hcd_status_str, "device_mode ");
5077 +  }
5078 +  if(bUsbStatus & IO_STATE(R_USB_STATUS, host_mode, yes)) {
5079 +    strcat(hcd_status_str, "host_mode ");
5080 +  }
5081 +  if(bUsbStatus & IO_STATE(R_USB_STATUS, started, yes)) {
5082 +    strcat(hcd_status_str, "started ");
5083 +  }
5084 +  if(bUsbStatus & IO_STATE(R_USB_STATUS, running, yes)) {
5085 +    strcat(hcd_status_str, "running ");
5086 +  }
5087 +  return hcd_status_str;
5088 +}
5089 +
5090 +
5091 +char* sblist_to_str(struct USB_SB_Desc* sb_desc) {
5092 +  static char sblist_to_str_buff[128];
5093 +  char tmp[32], tmp2[32];
5094 +  sblist_to_str_buff[0] = '\0';
5095 +  while(sb_desc != NULL) {
5096 +    switch(IO_EXTRACT(USB_SB_command, tt, sb_desc->command)) {
5097 +    case 0: sprintf(tmp, "zout");  break;
5098 +    case 1: sprintf(tmp, "in");    break;
5099 +    case 2: sprintf(tmp, "out");   break;
5100 +    case 3: sprintf(tmp, "setup"); break;
5101 +    }
5102 +    sprintf(tmp2, "(%s %d)", tmp, sb_desc->sw_len);
5103 +    strcat(sblist_to_str_buff, tmp2);
5104 +    if(sb_desc->next != 0) {
5105 +      sb_desc = phys_to_virt(sb_desc->next);
5106 +    } else {
5107 +      sb_desc = NULL;
5108 +    }
5109 +  }
5110 +  return sblist_to_str_buff;
5111 +}
5112 +
5113 +char* port_status_to_str(__u16 wPortStatus) {
5114 +  static char port_status_str[128];
5115 +  port_status_str[0] = '\0';
5116 +  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) {
5117 +    strcat(port_status_str, "connected ");
5118 +  }
5119 +  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) {
5120 +    strcat(port_status_str, "enabled ");
5121 +  }
5122 +  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, suspended, yes)) {
5123 +    strcat(port_status_str, "suspended ");
5124 +  }
5125 +  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes)) {
5126 +    strcat(port_status_str, "reset ");
5127 +  }
5128 +  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, speed, full)) {
5129 +    strcat(port_status_str, "full-speed ");
5130 +  } else {
5131 +    strcat(port_status_str, "low-speed ");
5132 +  }
5133 +  return port_status_str;
5134 +}
5135 +
5136 +
5137 +char* endpoint_to_str(struct usb_endpoint_descriptor *ed) {
5138 +  static char endpoint_to_str_buff[128];
5139 +  char tmp[32];
5140 +  int epnum = ed->bEndpointAddress & 0x0F;
5141 +  int dir = ed->bEndpointAddress & 0x80;
5142 +  int type = ed->bmAttributes & 0x03;
5143 +  endpoint_to_str_buff[0] = '\0';
5144 +  sprintf(endpoint_to_str_buff, "ep:%d ", epnum);
5145 +  switch(type) {
5146 +  case 0:
5147 +    sprintf(tmp, " ctrl");
5148 +    break;
5149 +  case 1:
5150 +    sprintf(tmp, " isoc");
5151 +    break;
5152 +  case 2:
5153 +    sprintf(tmp, " bulk");
5154 +    break;
5155 +  case 3:
5156 +    sprintf(tmp, " intr");
5157 +    break;
5158 +  }
5159 +  strcat(endpoint_to_str_buff, tmp);
5160 +  if(dir) {
5161 +    sprintf(tmp, " in");
5162 +  } else {
5163 +    sprintf(tmp, " out");
5164 +  }
5165 +  strcat(endpoint_to_str_buff, tmp);
5166 +
5167 +  return endpoint_to_str_buff;
5168 +}
5169 +
5170 +/* Debug helper functions for Transfer Controller */
5171 +char* pipe_to_str(unsigned int pipe) {
5172 +  static char pipe_to_str_buff[128];
5173 +  char tmp[64];
5174 +  sprintf(pipe_to_str_buff, "dir:%s", str_dir(pipe));
5175 +  sprintf(tmp, " type:%s", str_type(pipe));
5176 +  strcat(pipe_to_str_buff, tmp);
5177 +
5178 +  sprintf(tmp, " dev:%d", usb_pipedevice(pipe));
5179 +  strcat(pipe_to_str_buff, tmp);
5180 +  sprintf(tmp, " ep:%d", usb_pipeendpoint(pipe));
5181 +  strcat(pipe_to_str_buff, tmp);
5182 +  return pipe_to_str_buff;
5183  }
5184  
5185 -/* Move an urb to the end of the list. */
5186 -static inline void urb_list_move_last(struct urb *urb, int epid)
5187 -{
5188 -       urb_entry_t *urb_entry = __urb_list_entry(urb, epid);
5189 -       assert(urb_entry);
5190 -
5191 -       list_move_tail(&urb_entry->list, &urb_list[epid]);
5192 -}
5193  
5194 -/* Get the next urb in the list. */
5195 -static inline struct urb *urb_list_next(struct urb *urb, int epid)
5196 -{
5197 -       urb_entry_t *urb_entry = __urb_list_entry(urb, epid);
5198 +#define USB_DEBUG_DESC 1
5199  
5200 -       assert(urb_entry);
5201 +#ifdef USB_DEBUG_DESC
5202 +#define dump_in_desc(x) __dump_in_desc(x)
5203 +#define dump_sb_desc(...) __dump_sb_desc(...)
5204 +#define dump_ep_desc(x) __dump_ep_desc(x)
5205 +#define dump_ept_data(x) __dump_ept_data(x)
5206 +#else
5207 +#define dump_in_desc(...) do {} while (0)
5208 +#define dump_sb_desc(...) do {} while (0)
5209 +#define dump_ep_desc(...) do {} while (0)
5210 +#endif
5211  
5212 -       if (urb_entry->list.next != &urb_list[epid]) {
5213 -               struct list_head *elem = urb_entry->list.next;
5214 -               urb_entry = list_entry(elem, urb_entry_t, list);
5215 -               return urb_entry->urb;
5216 -       } else {
5217 -               return NULL;
5218 -       }
5219 -}
5220  
5221 +/* Uncomment this to enable massive function call trace
5222 +   #define USB_DEBUG_TRACE */
5223  
5224 +#ifdef USB_DEBUG_TRACE
5225 +#define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
5226 +#define DBFEXIT  (printk(": Exiting:  %s\n", __FUNCTION__))
5227 +#else
5228 +#define DBFENTER do {} while (0)
5229 +#define DBFEXIT  do {} while (0)
5230 +#endif
5231  
5232 -/* For debug purposes only. */
5233 -static inline void urb_list_dump(int epid)
5234 -{
5235 -       struct list_head *entry;
5236 -       struct list_head *tmp;
5237 -       urb_entry_t *urb_entry;
5238 -       int i = 0;
5239 -
5240 -       info("Dumping urb list for epid %d", epid);
5241 -
5242 -       list_for_each_safe(entry, tmp, &urb_list[epid]) {
5243 -               urb_entry = list_entry(entry, urb_entry_t, list);
5244 -               info("   entry %d, urb = 0x%lx", i, (unsigned long)urb_entry->urb);
5245 -       }
5246 -}
5247 +#define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
5248 +{panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
5249  
5250 -static void init_rx_buffers(void);
5251 -static int etrax_rh_unlink_urb(struct urb *urb);
5252 -static void etrax_rh_send_irq(struct urb *urb);
5253 -static void etrax_rh_init_int_timer(struct urb *urb);
5254 -static void etrax_rh_int_timer_do(unsigned long ptr);
5255 -
5256 -static int etrax_usb_setup_epid(struct urb *urb);
5257 -static int etrax_usb_lookup_epid(struct urb *urb);
5258 -static int etrax_usb_allocate_epid(void);
5259 -static void etrax_usb_free_epid(int epid);
5260 -
5261 -static int etrax_remove_from_sb_list(struct urb *urb);
5262 -
5263 -static void* etrax_usb_buffer_alloc(struct usb_bus* bus, size_t size,
5264 -       unsigned mem_flags, dma_addr_t *dma);
5265 -static void etrax_usb_buffer_free(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma);
5266 -
5267 -static void etrax_usb_add_to_bulk_sb_list(struct urb *urb, int epid);
5268 -static void etrax_usb_add_to_ctrl_sb_list(struct urb *urb, int epid);
5269 -static void etrax_usb_add_to_intr_sb_list(struct urb *urb, int epid);
5270 -static void etrax_usb_add_to_isoc_sb_list(struct urb *urb, int epid);
5271 -
5272 -static int etrax_usb_submit_bulk_urb(struct urb *urb);
5273 -static int etrax_usb_submit_ctrl_urb(struct urb *urb);
5274 -static int etrax_usb_submit_intr_urb(struct urb *urb);
5275 -static int etrax_usb_submit_isoc_urb(struct urb *urb);
5276 -
5277 -static int etrax_usb_submit_urb(struct urb *urb, unsigned mem_flags);
5278 -static int etrax_usb_unlink_urb(struct urb *urb, int status);
5279 -static int etrax_usb_get_frame_number(struct usb_device *usb_dev);
5280 -
5281 -static irqreturn_t etrax_usb_tx_interrupt(int irq, void *vhc);
5282 -static irqreturn_t etrax_usb_rx_interrupt(int irq, void *vhc);
5283 -static irqreturn_t etrax_usb_hc_interrupt_top_half(int irq, void *vhc);
5284 -static void etrax_usb_hc_interrupt_bottom_half(void *data);
5285 -
5286 -static void etrax_usb_isoc_descr_interrupt_bottom_half(void *data);
5287 -
5288 -
5289 -/* The following is a list of interrupt handlers for the host controller interrupts we use.
5290 -   They are called from etrax_usb_hc_interrupt_bottom_half. */
5291 -static void etrax_usb_hc_isoc_eof_interrupt(void);
5292 -static void etrax_usb_hc_bulk_eot_interrupt(int timer_induced);
5293 -static void etrax_usb_hc_epid_attn_interrupt(usb_interrupt_registers_t *reg);
5294 -static void etrax_usb_hc_port_status_interrupt(usb_interrupt_registers_t *reg);
5295 -static void etrax_usb_hc_ctl_status_interrupt(usb_interrupt_registers_t *reg);
5296 -
5297 -static int etrax_rh_submit_urb (struct urb *urb);
5298 -
5299 -/* Forward declaration needed because they are used in the rx interrupt routine. */
5300 -static void etrax_usb_complete_urb(struct urb *urb, int status);
5301 -static void etrax_usb_complete_bulk_urb(struct urb *urb, int status);
5302 -static void etrax_usb_complete_ctrl_urb(struct urb *urb, int status);
5303 -static void etrax_usb_complete_intr_urb(struct urb *urb, int status);
5304 -static void etrax_usb_complete_isoc_urb(struct urb *urb, int status);
5305 +/* Most helpful debugging aid */
5306 +#define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__))))
5307  
5308 -static int etrax_usb_hc_init(void);
5309 -static void etrax_usb_hc_cleanup(void);
5310  
5311 -static struct usb_operations etrax_usb_device_operations =
5312 -{
5313 -       .get_frame_number = etrax_usb_get_frame_number,
5314 -       .submit_urb = etrax_usb_submit_urb,
5315 -       .unlink_urb = etrax_usb_unlink_urb,
5316 -        .buffer_alloc = etrax_usb_buffer_alloc,
5317 -        .buffer_free = etrax_usb_buffer_free
5318 -};
5319 +/***************************************************************************/
5320 +/***************************************************************************/
5321 +/* Forward declarations                                                    */
5322 +/***************************************************************************/
5323 +/***************************************************************************/
5324 +void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg);
5325 +void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg);
5326 +void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg);
5327 +void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg);
5328 +
5329 +void rh_port_status_change(__u16[]);
5330 +int  rh_clear_port_feature(__u8, __u16);
5331 +int  rh_set_port_feature(__u8, __u16);
5332 +static void rh_disable_port(unsigned int port);
5333 +
5334 +static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
5335 +                                        int timer);
5336 +
5337 +static int  tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
5338 +                        int mem_flags);
5339 +static void tc_free_epid(struct usb_host_endpoint *ep);
5340 +static int  tc_allocate_epid(void);
5341 +static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status);
5342 +static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
5343 +                               int status);
5344 +
5345 +static int  urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
5346 +                          int mem_flags);
5347 +static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb);
5348 +
5349 +static inline struct urb *urb_list_first(int epid);
5350 +static inline void        urb_list_add(struct urb *urb, int epid,
5351 +                                     int mem_flags);
5352 +static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid);
5353 +static inline void        urb_list_del(struct urb *urb, int epid);
5354 +static inline void        urb_list_move_last(struct urb *urb, int epid);
5355 +static inline struct urb *urb_list_next(struct urb *urb, int epid);
5356 +
5357 +int create_sb_for_urb(struct urb *urb, int mem_flags);
5358 +int init_intr_urb(struct urb *urb, int mem_flags);
5359 +
5360 +static inline void  etrax_epid_set(__u8 index, __u32 data);
5361 +static inline void  etrax_epid_clear_error(__u8 index);
5362 +static inline void  etrax_epid_set_toggle(__u8 index, __u8 dirout,
5363 +                                             __u8 toggle);
5364 +static inline __u8  etrax_epid_get_toggle(__u8 index, __u8 dirout);
5365 +static inline __u32 etrax_epid_get(__u8 index);
5366 +
5367 +/* We're accessing the same register position in Etrax so
5368 +   when we do full access the internal difference doesn't matter */
5369 +#define etrax_epid_iso_set(index, data) etrax_epid_set(index, data)
5370 +#define etrax_epid_iso_get(index) etrax_epid_get(index)
5371 +
5372 +
5373 +static void        tc_dma_process_isoc_urb(struct urb *urb);
5374 +static void        tc_dma_process_queue(int epid);
5375 +static void        tc_dma_unlink_intr_urb(struct urb *urb);
5376 +static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc);
5377 +static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc);
5378 +
5379 +static void tc_bulk_start_timer_func(unsigned long dummy);
5380 +static void tc_bulk_eot_timer_func(unsigned long dummy);
5381 +
5382 +
5383 +/*************************************************************/
5384 +/*************************************************************/
5385 +/* Host Controler Driver block                               */
5386 +/*************************************************************/
5387 +/*************************************************************/
5388 +
5389 +/* HCD operations */
5390 +static irqreturn_t crisv10_hcd_top_irq(int irq, void*);
5391 +static int crisv10_hcd_reset(struct usb_hcd *);
5392 +static int crisv10_hcd_start(struct usb_hcd *);
5393 +static void crisv10_hcd_stop(struct usb_hcd *);
5394 +#ifdef CONFIG_PM
5395 +static int crisv10_hcd_suspend(struct device *, u32, u32);
5396 +static int crisv10_hcd_resume(struct device *, u32);
5397 +#endif /* CONFIG_PM */
5398 +static int crisv10_hcd_get_frame(struct usb_hcd *);
5399 +
5400 +static int  tc_urb_enqueue(struct usb_hcd *, struct usb_host_endpoint *ep, struct urb *, gfp_t mem_flags);
5401 +static int  tc_urb_dequeue(struct usb_hcd *, struct urb *);
5402 +static void tc_endpoint_disable(struct usb_hcd *, struct usb_host_endpoint *ep);
5403 +
5404 +static int rh_status_data_request(struct usb_hcd *, char *);
5405 +static int rh_control_request(struct usb_hcd *, u16, u16, u16, char*, u16);
5406 +
5407 +#ifdef CONFIG_PM
5408 +static int crisv10_hcd_hub_suspend(struct usb_hcd *);
5409 +static int crisv10_hcd_hub_resume(struct usb_hcd *);
5410 +#endif /* CONFIG_PM */
5411 +#ifdef CONFIG_USB_OTG
5412 +static int crisv10_hcd_start_port_reset(struct usb_hcd *, unsigned);
5413 +#endif /* CONFIG_USB_OTG */
5414 +
5415 +/* host controller driver interface */
5416 +static const struct hc_driver crisv10_hc_driver = 
5417 +  {
5418 +    .description =     hc_name,
5419 +    .product_desc =    product_desc,
5420 +    .hcd_priv_size =   sizeof(struct crisv10_hcd),
5421 +
5422 +    /* Attaching IRQ handler manualy in probe() */
5423 +    /* .irq =          crisv10_hcd_irq, */
5424 +
5425 +    .flags =           HCD_USB11,
5426 +
5427 +    /* called to init HCD and root hub */
5428 +    .reset =           crisv10_hcd_reset,
5429 +    .start =           crisv10_hcd_start,      
5430 +
5431 +    /* cleanly make HCD stop writing memory and doing I/O */
5432 +    .stop =            crisv10_hcd_stop,
5433 +
5434 +    /* return current frame number */
5435 +    .get_frame_number =        crisv10_hcd_get_frame,
5436 +
5437 +
5438 +    /* Manage i/o requests via the Transfer Controller */
5439 +    .urb_enqueue =     tc_urb_enqueue,
5440 +    .urb_dequeue =     tc_urb_dequeue,
5441 +
5442 +    /* hw synch, freeing endpoint resources that urb_dequeue can't */
5443 +    .endpoint_disable = tc_endpoint_disable,
5444 +
5445 +
5446 +    /* Root Hub support */
5447 +    .hub_status_data = rh_status_data_request,
5448 +    .hub_control =     rh_control_request,
5449 +#ifdef CONFIG_PM
5450 +    .hub_suspend =     rh_suspend_request,
5451 +    .hub_resume =      rh_resume_request,
5452 +#endif /* CONFIG_PM */
5453 +#ifdef CONFIG_USB_OTG
5454 +    .start_port_reset =        crisv10_hcd_start_port_reset,
5455 +#endif /* CONFIG_USB_OTG */
5456 +  };
5457  
5458 -/* Note that these functions are always available in their "__" variants, for use in
5459 -   error situations. The "__" missing variants are controlled by the USB_DEBUG_DESC/
5460 -   USB_DEBUG_URB macros. */
5461 -static void __dump_urb(struct urb* purb)
5462 -{
5463 -       printk("\nurb                  :0x%08lx\n", (unsigned long)purb);
5464 -       printk("dev                   :0x%08lx\n", (unsigned long)purb->dev);
5465 -       printk("pipe                  :0x%08x\n", purb->pipe);
5466 -       printk("status                :%d\n", purb->status);
5467 -       printk("transfer_flags        :0x%08x\n", purb->transfer_flags);
5468 -       printk("transfer_buffer       :0x%08lx\n", (unsigned long)purb->transfer_buffer);
5469 -       printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
5470 -       printk("actual_length         :%d\n", purb->actual_length);
5471 -       printk("setup_packet          :0x%08lx\n", (unsigned long)purb->setup_packet);
5472 -       printk("start_frame           :%d\n", purb->start_frame);
5473 -       printk("number_of_packets     :%d\n", purb->number_of_packets);
5474 -       printk("interval              :%d\n", purb->interval);
5475 -       printk("error_count           :%d\n", purb->error_count);
5476 -       printk("context               :0x%08lx\n", (unsigned long)purb->context);
5477 -       printk("complete              :0x%08lx\n\n", (unsigned long)purb->complete);
5478 -}
5479  
5480 -static void __dump_in_desc(volatile USB_IN_Desc_t *in)
5481 -{
5482 -       printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
5483 -       printk("  sw_len  : 0x%04x (%d)\n", in->sw_len, in->sw_len);
5484 -       printk("  command : 0x%04x\n", in->command);
5485 -       printk("  next    : 0x%08lx\n", in->next);
5486 -       printk("  buf     : 0x%08lx\n", in->buf);
5487 -       printk("  hw_len  : 0x%04x (%d)\n", in->hw_len, in->hw_len);
5488 -       printk("  status  : 0x%04x\n\n", in->status);
5489 -}
5490 +/*
5491 + * conversion between pointers to a hcd and the corresponding
5492 + * crisv10_hcd 
5493 + */
5494  
5495 -static void __dump_sb_desc(volatile USB_SB_Desc_t *sb)
5496 +static inline struct crisv10_hcd *hcd_to_crisv10_hcd(struct usb_hcd *hcd)
5497  {
5498 -       char tt = (sb->command & 0x30) >> 4;
5499 -       char *tt_string;
5500 -
5501 -       switch (tt) {
5502 -       case 0:
5503 -               tt_string = "zout";
5504 -               break;
5505 -       case 1:
5506 -               tt_string = "in";
5507 -               break;
5508 -       case 2:
5509 -               tt_string = "out";
5510 -               break;
5511 -       case 3:
5512 -               tt_string = "setup";
5513 -               break;
5514 -       default:
5515 -               tt_string = "unknown (weird)";
5516 -       }
5517 -
5518 -       printk("\n   USB_SB_Desc at 0x%08lx\n", (unsigned long)sb);
5519 -       printk("     command : 0x%04x\n", sb->command);
5520 -       printk("        rem     : %d\n", (sb->command & 0x3f00) >> 8);
5521 -       printk("        full    : %d\n", (sb->command & 0x40) >> 6);
5522 -       printk("        tt      : %d (%s)\n", tt, tt_string);
5523 -       printk("        intr    : %d\n", (sb->command & 0x8) >> 3);
5524 -       printk("        eot     : %d\n", (sb->command & 0x2) >> 1);
5525 -       printk("        eol     : %d\n", sb->command & 0x1);
5526 -       printk("     sw_len  : 0x%04x (%d)\n", sb->sw_len, sb->sw_len);
5527 -       printk("     next    : 0x%08lx\n", sb->next);
5528 -       printk("     buf     : 0x%08lx\n\n", sb->buf);
5529 +       return (struct crisv10_hcd *) hcd->hcd_priv;
5530  }
5531  
5532 -
5533 -static void __dump_ep_desc(volatile USB_EP_Desc_t *ep)
5534 +static inline struct usb_hcd *crisv10_hcd_to_hcd(struct crisv10_hcd *hcd)
5535  {
5536 -       printk("\nUSB_EP_Desc at 0x%08lx\n", (unsigned long)ep);
5537 -       printk("  command : 0x%04x\n", ep->command);
5538 -       printk("     ep_id   : %d\n", (ep->command & 0x1f00) >> 8);
5539 -       printk("     enable  : %d\n", (ep->command & 0x10) >> 4);
5540 -       printk("     intr    : %d\n", (ep->command & 0x8) >> 3);
5541 -       printk("     eof     : %d\n", (ep->command & 0x2) >> 1);
5542 -       printk("     eol     : %d\n", ep->command & 0x1);
5543 -       printk("  hw_len  : 0x%04x (%d)\n", ep->hw_len, ep->hw_len);
5544 -       printk("  next    : 0x%08lx\n", ep->next);
5545 -       printk("  sub     : 0x%08lx\n\n", ep->sub);
5546 +       return container_of((void *) hcd, struct usb_hcd, hcd_priv);
5547  }
5548  
5549 -static inline void __dump_ep_list(int pipe_type)
5550 +/* check if specified port is in use */
5551 +static inline int port_in_use(unsigned int port)
5552  {
5553 -       volatile USB_EP_Desc_t *ep;
5554 -       volatile USB_EP_Desc_t *first_ep;
5555 -       volatile USB_SB_Desc_t *sb;
5556 -
5557 -       switch (pipe_type)
5558 -       {
5559 -       case PIPE_BULK:
5560 -               first_ep = &TxBulkEPList[0];
5561 -               break;
5562 -       case PIPE_CONTROL:
5563 -               first_ep = &TxCtrlEPList[0];
5564 -               break;
5565 -       case PIPE_INTERRUPT:
5566 -               first_ep = &TxIntrEPList[0];
5567 -               break;
5568 -       case PIPE_ISOCHRONOUS:
5569 -               first_ep = &TxIsocEPList[0];
5570 -               break;
5571 -       default:
5572 -               warn("Cannot dump unknown traffic type");
5573 -               return;
5574 -       }
5575 -       ep = first_ep;
5576 -
5577 -       printk("\n\nDumping EP list...\n\n");
5578 -
5579 -       do {
5580 -               __dump_ep_desc(ep);
5581 -               /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
5582 -               sb = ep->sub ? phys_to_virt(ep->sub) : 0;
5583 -               while (sb) {
5584 -                       __dump_sb_desc(sb);
5585 -                       sb = sb->next ? phys_to_virt(sb->next) : 0;
5586 -               }
5587 -               ep = (volatile USB_EP_Desc_t *)(phys_to_virt(ep->next));
5588 -
5589 -       } while (ep != first_ep);
5590 +       return ports & (1 << port);
5591  }
5592  
5593 -static inline void __dump_ept_data(int epid)
5594 +/* number of ports in use */
5595 +static inline unsigned int num_ports(void)
5596  {
5597 -       unsigned long flags;
5598 -       __u32 r_usb_ept_data;
5599 -
5600 -       if (epid < 0 || epid > 31) {
5601 -               printk("Cannot dump ept data for invalid epid %d\n", epid);
5602 -               return;
5603 -       }
5604 -
5605 -       save_flags(flags);
5606 -       cli();
5607 -       *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
5608 -       nop();
5609 -       r_usb_ept_data = *R_USB_EPT_DATA;
5610 -       restore_flags(flags);
5611 -
5612 -       printk("\nR_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
5613 -       if (r_usb_ept_data == 0) {
5614 -               /* No need for more detailed printing. */
5615 -               return;
5616 -       }
5617 -       printk("  valid           : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
5618 -       printk("  hold            : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
5619 -       printk("  error_count_in  : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
5620 -       printk("  t_in            : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
5621 -       printk("  low_speed       : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
5622 -       printk("  port            : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
5623 -       printk("  error_code      : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
5624 -       printk("  t_out           : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
5625 -       printk("  error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
5626 -       printk("  max_len         : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
5627 -       printk("  ep              : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
5628 -       printk("  dev             : %d\n", (r_usb_ept_data & 0x0000003f));
5629 +       unsigned int i, num = 0;
5630 +       for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
5631 +               if (port_in_use(i))
5632 +                       num++;
5633 +       return num;
5634  }
5635  
5636 -static inline void __dump_ept_data_list(void)
5637 +/* map hub port number to the port number used internally by the HC */
5638 +static inline unsigned int map_port(unsigned int port)
5639  {
5640 -       int i;
5641 -
5642 -       printk("Dumping the whole R_USB_EPT_DATA list\n");
5643 -
5644 -       for (i = 0; i < 32; i++) {
5645 -               __dump_ept_data(i);
5646 -       }
5647 +  unsigned int i, num = 0;
5648 +  for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
5649 +    if (port_in_use(i))
5650 +      if (++num == port)
5651 +       return i;
5652 +  return -1;
5653  }
5654 -#ifdef USB_DEBUG_DESC
5655 -#define dump_in_desc(...) __dump_in_desc(...)
5656 -#define dump_sb_desc(...) __dump_sb_desc(...)
5657 -#define dump_ep_desc(...) __dump_ep_desc(...)
5658 -#else
5659 -#define dump_in_desc(...) do {} while (0)
5660 -#define dump_sb_desc(...) do {} while (0)
5661 -#define dump_ep_desc(...) do {} while (0)
5662 -#endif
5663  
5664 -#ifdef USB_DEBUG_URB
5665 -#define dump_urb(x)     __dump_urb(x)
5666 -#else
5667 -#define dump_urb(x)     do {} while (0)
5668 +/* size of descriptors in slab cache */
5669 +#ifndef MAX
5670 +#define MAX(x, y)              ((x) > (y) ? (x) : (y))
5671  #endif
5672  
5673 -static void init_rx_buffers(void)
5674 -{
5675 -       int i;
5676  
5677 -       DBFENTER;
5678 +/******************************************************************/
5679 +/* Hardware Interrupt functions                                   */
5680 +/******************************************************************/
5681 +
5682 +/* Fast interrupt handler for HC */
5683 +static irqreturn_t crisv10_hcd_top_irq(int irq, void *vcd)
5684 +{
5685 +  struct usb_hcd *hcd = vcd;
5686 +  struct crisv10_irq_reg reg;
5687 +  __u32 irq_mask;
5688 +  unsigned long flags;
5689 +
5690 +  DBFENTER;
5691 +
5692 +  ASSERT(hcd != NULL);
5693 +  reg.hcd = hcd;
5694 +
5695 +  /* Turn of other interrupts while handling these sensitive cases */
5696 +  local_irq_save(flags);
5697 +  
5698 +  /* Read out which interrupts that are flaged */
5699 +  irq_mask = *R_USB_IRQ_MASK_READ;
5700 +  reg.r_usb_irq_mask_read = irq_mask;
5701 +
5702 +  /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that
5703 +     R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter
5704 +     clears the ourun and perror fields of R_USB_STATUS. */
5705 +  reg.r_usb_status = *R_USB_STATUS;
5706 +  
5707 +  /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn
5708 +     interrupts. */
5709 +  reg.r_usb_epid_attn = *R_USB_EPID_ATTN;
5710 +  
5711 +  /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
5712 +     port_status interrupt. */
5713 +  reg.r_usb_rh_port_status_1 = *R_USB_RH_PORT_STATUS_1;
5714 +  reg.r_usb_rh_port_status_2 = *R_USB_RH_PORT_STATUS_2;
5715 +  
5716 +  /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
5717 +  /* Note: the lower 11 bits contain the actual frame number, sent with each
5718 +     sof. */
5719 +  reg.r_usb_fm_number = *R_USB_FM_NUMBER;
5720 +
5721 +  /* Interrupts are handled in order of priority. */
5722 +  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
5723 +    crisv10_hcd_port_status_irq(&reg);
5724 +  }
5725 +  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
5726 +    crisv10_hcd_epid_attn_irq(&reg);
5727 +  }
5728 +  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
5729 +    crisv10_hcd_ctl_status_irq(&reg);
5730 +  }
5731 +  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
5732 +    crisv10_hcd_isoc_eof_irq(&reg);
5733 +  }
5734 +  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
5735 +    /* Update/restart the bulk start timer since obviously the channel is
5736 +       running. */
5737 +    mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
5738 +    /* Update/restart the bulk eot timer since we just received an bulk eot
5739 +       interrupt. */
5740 +    mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
5741 +
5742 +    /* Check for finished bulk transfers on epids */
5743 +    check_finished_bulk_tx_epids(hcd, 0);
5744 +  }
5745 +  local_irq_restore(flags);
5746 +
5747 +  DBFEXIT;
5748 +  return IRQ_HANDLED;
5749 +}
5750 +
5751 +
5752 +void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg) {
5753 +  struct usb_hcd *hcd = reg->hcd;
5754 +  struct crisv10_urb_priv *urb_priv;
5755 +  int epid;
5756 +  DBFENTER;
5757 +
5758 +  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
5759 +    if (test_bit(epid, (void *)&reg->r_usb_epid_attn)) {
5760 +      struct urb *urb;
5761 +      __u32 ept_data;
5762 +      int error_code;
5763 +
5764 +      if (epid == DUMMY_EPID || epid == INVALID_EPID) {
5765 +       /* We definitely don't care about these ones. Besides, they are
5766 +          always disabled, so any possible disabling caused by the
5767 +          epid attention interrupt is irrelevant. */
5768 +       warn("Got epid_attn for INVALID_EPID or DUMMY_EPID (%d).", epid);
5769 +       continue;
5770 +      }
5771 +
5772 +      if(!epid_inuse(epid)) {
5773 +       irq_err("Epid attention on epid:%d that isn't in use\n", epid);
5774 +       printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5775 +       debug_epid(epid);
5776 +       continue;
5777 +      }
5778 +
5779 +      /* Note that although there are separate R_USB_EPT_DATA and
5780 +        R_USB_EPT_DATA_ISO registers, they are located at the same address and
5781 +        are of the same size. In other words, this read should be ok for isoc
5782 +        also. */
5783 +      ept_data = etrax_epid_get(epid);
5784 +      error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, ept_data);
5785 +
5786 +      /* Get the active URB for this epid. We blatantly assume
5787 +        that only this URB could have caused the epid attention. */
5788 +      urb = activeUrbList[epid];
5789 +      if (urb == NULL) {
5790 +       irq_err("Attention on epid:%d error:%d with no active URB.\n",
5791 +               epid, error_code);
5792 +       printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5793 +       debug_epid(epid);
5794 +       continue;
5795 +      }
5796 +
5797 +      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
5798 +      ASSERT(urb_priv);
5799 +
5800 +      /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
5801 +      if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
5802 +
5803 +       /* Isoc traffic doesn't have error_count_in/error_count_out. */
5804 +       if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
5805 +           (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, ept_data) == 3 ||
5806 +            IO_EXTRACT(R_USB_EPT_DATA, error_count_out, ept_data) == 3)) {
5807 +         /* Check if URB allready is marked for late-finish, we can get
5808 +            several 3rd error for Intr traffic when a device is unplugged */
5809 +         if(urb_priv->later_data == NULL) {
5810 +           /* 3rd error. */
5811 +           irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid,
5812 +                    str_dir(urb->pipe), str_type(urb->pipe),
5813 +                    (unsigned int)urb, urb_priv->urb_num);
5814 +         
5815 +           tc_finish_urb_later(hcd, urb, -EPROTO);
5816 +         }
5817 +
5818 +       } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
5819 +         irq_warn("Perror for epid:%d\n", epid);
5820 +         printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
5821 +         printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5822 +         __dump_urb(urb);
5823 +         debug_epid(epid);
5824 +
5825 +         if (!(ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
5826 +           /* invalid ep_id */
5827 +           panic("Perror because of invalid epid."
5828 +                 " Deconfigured too early?");
5829 +         } else {
5830 +           /* past eof1, near eof, zout transfer, setup transfer */
5831 +           /* Dump the urb and the relevant EP descriptor. */
5832 +           panic("Something wrong with DMA descriptor contents."
5833 +                 " Too much traffic inserted?");
5834 +         }
5835 +       } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
5836 +         /* buffer ourun */
5837 +         printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
5838 +         printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5839 +         __dump_urb(urb);
5840 +         debug_epid(epid);
5841  
5842 -       for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) {
5843 -               RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
5844 -               RxDescList[i].command = 0;
5845 -               RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]);
5846 -               RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
5847 -               RxDescList[i].hw_len = 0;
5848 -               RxDescList[i].status = 0;
5849 -
5850 -               /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as USB_IN_Desc
5851 -                  for the relevant fields.) */
5852 -               prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]);
5853 +         panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid);
5854 +       } else {
5855 +         irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid,
5856 +                  str_dir(urb->pipe), str_type(urb->pipe));
5857 +         printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5858 +         __dump_urb(urb);
5859 +         debug_epid(epid);
5860 +       }
5861  
5862 +      } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
5863 +                                             stall)) {
5864 +       /* Not really a protocol error, just says that the endpoint gave
5865 +          a stall response. Note that error_code cannot be stall for isoc. */
5866 +       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
5867 +         panic("Isoc traffic cannot stall");
5868         }
5869  
5870 -       RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
5871 -       RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes);
5872 -       RxDescList[i].next = virt_to_phys(&RxDescList[0]);
5873 -       RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
5874 -       RxDescList[i].hw_len = 0;
5875 -       RxDescList[i].status = 0;
5876 +       tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid,
5877 +              str_dir(urb->pipe), str_type(urb->pipe), (unsigned int)urb);
5878 +       tc_finish_urb(hcd, urb, -EPIPE);
5879 +
5880 +      } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
5881 +                                             bus_error)) {
5882 +       /* Two devices responded to a transaction request. Must be resolved
5883 +          by software. FIXME: Reset ports? */
5884 +       panic("Bus error for epid %d."
5885 +             " Two devices responded to transaction request\n",
5886 +             epid);
5887 +
5888 +      } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
5889 +                                             buffer_error)) {
5890 +       /* DMA overrun or underrun. */
5891 +       irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid,
5892 +                str_dir(urb->pipe), str_type(urb->pipe));
5893 +
5894 +       /* It seems that error_code = buffer_error in
5895 +          R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
5896 +          are the same error. */
5897 +       tc_finish_urb(hcd, urb, -EPROTO);
5898 +      } else {
5899 +         irq_warn("Unknown attention on epid:%d (%s %s)\n", epid,
5900 +                  str_dir(urb->pipe), str_type(urb->pipe));
5901 +         dump_ept_data(epid);
5902 +      }
5903 +    }
5904 +  }
5905 +  DBFEXIT;
5906 +}
5907 +
5908 +void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg)
5909 +{
5910 +  __u16 port_reg[USB_ROOT_HUB_PORTS];
5911 +  DBFENTER;
5912 +  port_reg[0] = reg->r_usb_rh_port_status_1;
5913 +  port_reg[1] = reg->r_usb_rh_port_status_2;
5914 +  rh_port_status_change(port_reg);
5915 +  DBFEXIT;
5916 +}
5917 +
5918 +void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg)
5919 +{
5920 +  int epid;
5921 +  struct urb *urb;
5922 +  struct crisv10_urb_priv *urb_priv;
5923 +
5924 +  DBFENTER;
5925 +
5926 +  for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
5927 +
5928 +    /* Only check epids that are in use, is valid and has SB list */
5929 +    if (!epid_inuse(epid) || epid == INVALID_EPID ||
5930 +       TxIsocEPList[epid].sub == 0 || epid == DUMMY_EPID) {
5931 +      /* Nothing here to see. */
5932 +      continue;
5933 +    }
5934 +    ASSERT(epid_isoc(epid));
5935 +
5936 +    /* Get the active URB for this epid (if any). */
5937 +    urb = activeUrbList[epid];
5938 +    if (urb == 0) {
5939 +      isoc_warn("Ignoring NULL urb for epid:%d\n", epid);
5940 +      continue;
5941 +    }
5942 +    if(!epid_out_traffic(epid)) {
5943 +      /* Sanity check. */
5944 +      ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
5945 +
5946 +      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
5947 +      ASSERT(urb_priv);
5948 +
5949 +      if (urb_priv->urb_state == NOT_STARTED) {
5950 +       /* If ASAP is not set and urb->start_frame is the current frame,
5951 +          start the transfer. */
5952 +       if (!(urb->transfer_flags & URB_ISO_ASAP) &&
5953 +           (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
5954 +         /* EP should not be enabled if we're waiting for start_frame */
5955 +         ASSERT((TxIsocEPList[epid].command &
5956 +                 IO_STATE(USB_EP_command, enable, yes)) == 0);
5957 +
5958 +         isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid);
5959 +         TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
5960 +
5961 +         /* This urb is now active. */
5962 +         urb_priv->urb_state = STARTED;
5963 +         continue;
5964 +       }
5965 +      }
5966 +    }
5967 +  }
5968 +
5969 +  DBFEXIT;
5970 +}
5971 +
5972 +void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg)
5973 +{
5974 +  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(reg->hcd);
5975 +
5976 +  DBFENTER;
5977 +  ASSERT(crisv10_hcd);
5978 +
5979 +  irq_dbg("ctr_status_irq, controller status: %s\n",
5980 +         hcd_status_to_str(reg->r_usb_status));
5981 +  
5982 +  /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
5983 +     list for the corresponding epid? */
5984 +  if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
5985 +    panic("USB controller got ourun.");
5986 +  }
5987 +  if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
5988 +    
5989 +    /* Before, etrax_usb_do_intr_recover was called on this epid if it was
5990 +       an interrupt pipe. I don't see how re-enabling all EP descriptors
5991 +       will help if there was a programming error. */
5992 +    panic("USB controller got perror.");
5993 +  }
5994 +
5995 +  /* Keep track of USB Controller, if it's running or not */
5996 +  if(reg->r_usb_status & IO_STATE(R_USB_STATUS, running, yes)) {
5997 +    crisv10_hcd->running = 1;
5998 +  } else {
5999 +    crisv10_hcd->running = 0;
6000 +  }
6001 +  
6002 +  if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
6003 +    /* We should never operate in device mode. */
6004 +    panic("USB controller in device mode.");
6005 +  }
6006 +
6007 +  /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably
6008 +     using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */
6009 +  set_bit(HCD_FLAG_SAW_IRQ, &reg->hcd->flags);
6010 +  
6011 +  DBFEXIT;
6012 +}
6013 +
6014 +
6015 +/******************************************************************/
6016 +/* Host Controller interface functions                            */
6017 +/******************************************************************/
6018 +
6019 +static inline void crisv10_ready_wait(void) {
6020 +  volatile int timeout = 10000;
6021 +  /* Check the busy bit of USB controller in Etrax */
6022 +  while((*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy)) &&
6023 +       (timeout-- > 0));
6024 +  if(timeout == 0) {
6025 +    warn("Timeout while waiting for USB controller to be idle\n");
6026 +  }
6027 +}
6028 +
6029 +/* reset host controller */
6030 +static int crisv10_hcd_reset(struct usb_hcd *hcd)
6031 +{
6032 +  DBFENTER;
6033 +  hcd_dbg(hcd, "reset\n");
6034 +
6035 +
6036 +  /* Reset the USB interface. */
6037 +  /*
6038 +  *R_USB_COMMAND =
6039 +    IO_STATE(R_USB_COMMAND, port_sel, nop) |
6040 +    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
6041 +    IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
6042 +  nop();
6043 +  */
6044 +  DBFEXIT;
6045 +  return 0;
6046 +}
6047 +
6048 +/* start host controller */
6049 +static int crisv10_hcd_start(struct usb_hcd *hcd)
6050 +{
6051 +  DBFENTER;
6052 +  hcd_dbg(hcd, "start\n");
6053 +
6054 +  crisv10_ready_wait();
6055 +
6056 +  /* Start processing of USB traffic. */
6057 +  *R_USB_COMMAND =
6058 +    IO_STATE(R_USB_COMMAND, port_sel, nop) |
6059 +    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
6060 +    IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
6061 +
6062 +  nop();
6063 +
6064 +  hcd->state = HC_STATE_RUNNING;
6065 +
6066 +  DBFEXIT;
6067 +  return 0;
6068 +}
6069 +
6070 +/* stop host controller */
6071 +static void crisv10_hcd_stop(struct usb_hcd *hcd)
6072 +{
6073 +  DBFENTER;
6074 +  hcd_dbg(hcd, "stop\n");
6075 +  crisv10_hcd_reset(hcd);
6076 +  DBFEXIT;
6077 +}
6078 +
6079 +/* return the current frame number */
6080 +static int crisv10_hcd_get_frame(struct usb_hcd *hcd)
6081 +{
6082 +  DBFENTER;
6083 +  DBFEXIT;
6084 +  return (*R_USB_FM_NUMBER & 0x7ff);
6085 +}
6086 +
6087 +#ifdef CONFIG_USB_OTG
6088 +
6089 +static int crisv10_hcd_start_port_reset(struct usb_hcd *hcd, unsigned port)
6090 +{
6091 +  return 0; /* no-op for now */
6092 +}
6093 +
6094 +#endif /* CONFIG_USB_OTG */
6095 +
6096 +
6097 +/******************************************************************/
6098 +/* Root Hub functions                                             */
6099 +/******************************************************************/
6100 +
6101 +/* root hub status */
6102 +static const struct usb_hub_status rh_hub_status = 
6103 +  {
6104 +    .wHubStatus =              0,
6105 +    .wHubChange =              0,
6106 +  };
6107 +
6108 +/* root hub descriptor */
6109 +static const u8 rh_hub_descr[] =
6110 +  {
6111 +    0x09,                      /* bDescLength         */
6112 +    0x29,                      /* bDescriptorType     */
6113 +    USB_ROOT_HUB_PORTS,         /* bNbrPorts          */
6114 +    0x00,                      /* wHubCharacteristics */
6115 +    0x00,               
6116 +    0x01,                      /* bPwrOn2pwrGood      */
6117 +    0x00,                      /* bHubContrCurrent    */
6118 +    0x00,                      /* DeviceRemovable     */
6119 +    0xff                       /* PortPwrCtrlMask     */
6120 +  };
6121 +
6122 +/* Actual holder of root hub status*/
6123 +struct crisv10_rh rh;
6124 +
6125 +/* Initialize root hub data structures (called from dvdrv_hcd_probe()) */
6126 +int rh_init(void) {
6127 +  int i;
6128 +  /* Reset port status flags */
6129 +  for (i = 0; i < USB_ROOT_HUB_PORTS; i++) {
6130 +    rh.wPortChange[i] = 0;
6131 +    rh.wPortStatusPrev[i] = 0;
6132 +  }
6133 +  return 0;
6134 +}
6135 +
6136 +#define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\
6137 +                     (1<<USB_PORT_FEAT_ENABLE)|\
6138 +                     (1<<USB_PORT_FEAT_SUSPEND)|\
6139 +                     (1<<USB_PORT_FEAT_RESET))
6140 +
6141 +/* Handle port status change interrupt (called from bottom part interrupt) */
6142 +void rh_port_status_change(__u16 port_reg[]) {
6143 +  int i;
6144 +  __u16 wChange;
6145 +
6146 +  for(i = 0; i < USB_ROOT_HUB_PORTS; i++) {
6147 +    /* Xor out changes since last read, masked for important flags */
6148 +    wChange = (port_reg[i] & RH_FEAT_MASK) ^ rh.wPortStatusPrev[i];
6149 +    /* Or changes together with (if any) saved changes */
6150 +    rh.wPortChange[i] |= wChange;
6151 +    /* Save new status */
6152 +    rh.wPortStatusPrev[i] = port_reg[i];
6153 +
6154 +    if(wChange) {
6155 +      rh_dbg("Interrupt port_status change port%d: %s  Current-status:%s\n", i+1,
6156 +            port_status_to_str(wChange),
6157 +            port_status_to_str(port_reg[i]));
6158 +    }
6159 +  }
6160 +}
6161 +
6162 +/* Construct port status change bitmap for the root hub */
6163 +static int rh_status_data_request(struct usb_hcd *hcd, char *buf)
6164 +{
6165 +  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
6166 +  unsigned int i;
6167 +
6168 +  DBFENTER;
6169 +  /*
6170 +   * corresponds to hub status change EP (USB 2.0 spec section 11.13.4)
6171 +   * return bitmap indicating ports with status change
6172 +   */
6173 +  *buf = 0;
6174 +  spin_lock(&crisv10_hcd->lock);
6175 +  for (i = 1; i <= crisv10_hcd->num_ports; i++) {
6176 +    if (rh.wPortChange[map_port(i)]) {
6177 +      *buf |= (1 << i);
6178 +      rh_dbg("rh_status_data_request, change on port %d: %s  Current Status: %s\n", i,
6179 +            port_status_to_str(rh.wPortChange[map_port(i)]),
6180 +            port_status_to_str(rh.wPortStatusPrev[map_port(i)]));
6181 +    }
6182 +  }
6183 +  spin_unlock(&crisv10_hcd->lock);
6184 +  DBFEXIT;
6185 +  return *buf == 0 ? 0 : 1;
6186 +}
6187 +
6188 +/* Handle a control request for the root hub (called from hcd_driver) */
6189 +static int rh_control_request(struct usb_hcd *hcd, 
6190 +                             u16 typeReq, 
6191 +                             u16 wValue, 
6192 +                             u16 wIndex,
6193 +                             char *buf, 
6194 +                             u16 wLength) {
6195 +
6196 +  struct crisv10_hcd *crisv10_hcd = hcd_to_crisv10_hcd(hcd);
6197 +  int retval = 0;
6198 +  int len;
6199 +  DBFENTER;
6200 +
6201 +  switch (typeReq) {
6202 +  case GetHubDescriptor:
6203 +    rh_dbg("GetHubDescriptor\n");
6204 +    len = min_t(unsigned int, sizeof rh_hub_descr, wLength);
6205 +    memcpy(buf, rh_hub_descr, len);
6206 +    buf[2] = crisv10_hcd->num_ports;
6207 +    break;
6208 +  case GetHubStatus:
6209 +    rh_dbg("GetHubStatus\n");
6210 +    len = min_t(unsigned int, sizeof rh_hub_status, wLength);
6211 +    memcpy(buf, &rh_hub_status, len);
6212 +    break;
6213 +  case GetPortStatus:
6214 +    if (!wIndex || wIndex > crisv10_hcd->num_ports)
6215 +      goto error;
6216 +    rh_dbg("GetportStatus, port:%d change:%s  status:%s\n", wIndex,
6217 +          port_status_to_str(rh.wPortChange[map_port(wIndex)]),
6218 +          port_status_to_str(rh.wPortStatusPrev[map_port(wIndex)]));
6219 +    *(u16 *) buf = cpu_to_le16(rh.wPortStatusPrev[map_port(wIndex)]);
6220 +    *(u16 *) (buf + 2) = cpu_to_le16(rh.wPortChange[map_port(wIndex)]);
6221 +    break;
6222 +  case SetHubFeature:
6223 +    rh_dbg("SetHubFeature\n");
6224 +  case ClearHubFeature:
6225 +    rh_dbg("ClearHubFeature\n");
6226 +    switch (wValue) {
6227 +    case C_HUB_OVER_CURRENT:
6228 +    case C_HUB_LOCAL_POWER:
6229 +      rh_warn("Not implemented hub request:%d \n", typeReq);
6230 +      /* not implemented */
6231 +      break;
6232 +    default:
6233 +      goto error;
6234 +    }
6235 +    break;
6236 +  case SetPortFeature:
6237 +    if (!wIndex || wIndex > crisv10_hcd->num_ports)
6238 +      goto error;
6239 +    if(rh_set_port_feature(map_port(wIndex), wValue))
6240 +      goto error;
6241 +    break;
6242 +  case ClearPortFeature:
6243 +    if (!wIndex || wIndex > crisv10_hcd->num_ports)
6244 +      goto error;
6245 +    if(rh_clear_port_feature(map_port(wIndex), wValue))
6246 +      goto error;
6247 +    break;
6248 +  default:
6249 +    rh_warn("Unknown hub request: %d\n", typeReq);
6250 +  error:
6251 +    retval = -EPIPE;
6252 +  }
6253 +  DBFEXIT;
6254 +  return retval;
6255 +}
6256 +
6257 +int rh_set_port_feature(__u8 bPort, __u16 wFeature) {
6258 +  __u8 bUsbCommand = 0;
6259 +  switch(wFeature) {
6260 +  case USB_PORT_FEAT_RESET:
6261 +    rh_dbg("SetPortFeature: reset\n");
6262 +    bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, reset);
6263 +    goto set;
6264 +    break;
6265 +  case USB_PORT_FEAT_SUSPEND:
6266 +    rh_dbg("SetPortFeature: suspend\n");
6267 +    bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, suspend);
6268 +    goto set;
6269 +    break;
6270 +  case USB_PORT_FEAT_POWER:
6271 +    rh_dbg("SetPortFeature: power\n");
6272 +    break;
6273 +  case USB_PORT_FEAT_C_CONNECTION:
6274 +    rh_dbg("SetPortFeature: c_connection\n");
6275 +    break;
6276 +  case USB_PORT_FEAT_C_RESET:
6277 +    rh_dbg("SetPortFeature: c_reset\n");
6278 +    break;
6279 +  case USB_PORT_FEAT_C_OVER_CURRENT:
6280 +    rh_dbg("SetPortFeature: c_over_current\n");
6281 +    break;
6282 +
6283 +  set:
6284 +    /* Select which port via the port_sel field */
6285 +    bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
6286 +
6287 +    /* Make sure the controller isn't busy. */
6288 +    crisv10_ready_wait();
6289 +    /* Send out the actual command to the USB controller */
6290 +    *R_USB_COMMAND = bUsbCommand;
6291 +
6292 +    /* If port reset then also bring USB controller into running state */
6293 +    if(wFeature == USB_PORT_FEAT_RESET) {
6294 +      /* Wait a while for controller to first become started after port reset */
6295 +      udelay(12000); /* 12ms blocking wait */
6296 +      
6297 +      /* Make sure the controller isn't busy. */
6298 +      crisv10_ready_wait();
6299 +
6300 +      /* If all enabled ports were disabled the host controller goes down into
6301 +        started mode, so we need to bring it back into the running state.
6302 +        (This is safe even if it's already in the running state.) */
6303 +      *R_USB_COMMAND =
6304 +       IO_STATE(R_USB_COMMAND, port_sel, nop) |
6305 +       IO_STATE(R_USB_COMMAND, port_cmd, reset) |
6306 +       IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
6307 +    }
6308 +
6309 +    break;
6310 +  default:
6311 +    rh_dbg("SetPortFeature: unknown feature\n");
6312 +    return -1;
6313 +  }
6314 +  return 0;
6315 +}
6316 +
6317 +int rh_clear_port_feature(__u8 bPort, __u16 wFeature) {
6318 +  switch(wFeature) {
6319 +  case USB_PORT_FEAT_ENABLE:
6320 +    rh_dbg("ClearPortFeature: enable\n");
6321 +    rh_disable_port(bPort);
6322 +    break;
6323 +  case USB_PORT_FEAT_SUSPEND:
6324 +    rh_dbg("ClearPortFeature: suspend\n");
6325 +    break;
6326 +  case USB_PORT_FEAT_POWER:
6327 +    rh_dbg("ClearPortFeature: power\n");
6328 +    break;
6329 +
6330 +  case USB_PORT_FEAT_C_ENABLE:
6331 +    rh_dbg("ClearPortFeature: c_enable\n");
6332 +    goto clear;
6333 +  case USB_PORT_FEAT_C_SUSPEND:
6334 +    rh_dbg("ClearPortFeature: c_suspend\n");
6335 +    goto clear;
6336 +  case USB_PORT_FEAT_C_CONNECTION:
6337 +    rh_dbg("ClearPortFeature: c_connection\n");
6338 +    goto clear;
6339 +  case USB_PORT_FEAT_C_OVER_CURRENT:
6340 +    rh_dbg("ClearPortFeature: c_over_current\n");
6341 +    goto clear;
6342 +  case USB_PORT_FEAT_C_RESET:
6343 +    rh_dbg("ClearPortFeature: c_reset\n");
6344 +    goto clear;
6345 +  clear:
6346 +    rh.wPortChange[bPort] &= ~(1 << (wFeature - 16));
6347 +    break;
6348 +  default:
6349 +    rh_dbg("ClearPortFeature: unknown feature\n");
6350 +    return -1;
6351 +  }
6352 +  return 0;
6353 +}
6354 +
6355 +
6356 +#ifdef CONFIG_PM
6357 +/* Handle a suspend request for the root hub (called from hcd_driver) */
6358 +static int rh_suspend_request(struct usb_hcd *hcd)
6359 +{
6360 +  return 0; /* no-op for now */
6361 +}
6362 +
6363 +/* Handle a resume request for the root hub (called from hcd_driver) */
6364 +static int rh_resume_request(struct usb_hcd *hcd)
6365 +{
6366 +  return 0; /* no-op for now */
6367 +}
6368 +#endif /* CONFIG_PM */
6369 +
6370 +
6371 +
6372 +/* Wrapper function for workaround port disable registers in USB controller  */
6373 +static void rh_disable_port(unsigned int port) {
6374 +  volatile int timeout = 10000;
6375 +  volatile char* usb_portx_disable;
6376 +  switch(port) {
6377 +  case 0:
6378 +    usb_portx_disable = R_USB_PORT1_DISABLE;
6379 +    break;
6380 +  case 1:
6381 +    usb_portx_disable = R_USB_PORT2_DISABLE;
6382 +    break;
6383 +  default:
6384 +    /* Invalid port index */
6385 +    return;
6386 +  }
6387 +  /* Set disable flag in special register  */
6388 +  *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
6389 +  /* Wait until not enabled anymore */
6390 +  while((rh.wPortStatusPrev[port] &
6391 +       IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
6392 +       (timeout-- > 0));
6393 +  if(timeout == 0) {
6394 +    warn("Timeout while waiting for port %d to become disabled\n", port);
6395 +  }
6396 +  /* clear disable flag in special register  */
6397 +  *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
6398 +  rh_info("Physical port %d disabled\n", port+1);
6399 +}
6400 +
6401 +
6402 +/******************************************************************/
6403 +/* Transfer Controller (TC) functions                             */
6404 +/******************************************************************/
6405 +
6406 +/* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it
6407 +   dynamically?
6408 +   To adjust it dynamically we would have to get an interrupt when we reach
6409 +   the end of the rx descriptor list, or when we get close to the end, and
6410 +   then allocate more descriptors. */
6411 +#define NBR_OF_RX_DESC     512
6412 +#define RX_DESC_BUF_SIZE   1024
6413 +#define RX_BUF_SIZE        (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
6414  
6415 -       myNextRxDesc = &RxDescList[0];
6416 -       myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
6417 -       myPrevRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
6418  
6419 -       *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc);
6420 -       *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start);
6421 +/* Local variables for Transfer Controller */
6422 +/* --------------------------------------- */
6423  
6424 -       DBFEXIT;
6425 -}
6426 +/* This is a circular (double-linked) list of the active urbs for each epid.
6427 +   The head is never removed, and new urbs are linked onto the list as
6428 +   urb_entry_t elements. Don't reference urb_list directly; use the wrapper
6429 +   functions instead (which includes spin_locks) */
6430 +static struct list_head urb_list[NBR_OF_EPIDS];
6431  
6432 -static void init_tx_bulk_ep(void)
6433 -{
6434 -       int i;
6435 +/* Read about the need and usage of this lock in submit_ctrl_urb. */
6436 +/* Lock for URB lists for each EPID */
6437 +static spinlock_t urb_list_lock;
6438  
6439 -       DBFENTER;
6440 +/* Lock for EPID array register (R_USB_EPT_x) in Etrax */
6441 +static spinlock_t etrax_epid_lock;
6442  
6443 -       for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
6444 -               CHECK_ALIGN(&TxBulkEPList[i]);
6445 -               TxBulkEPList[i].hw_len = 0;
6446 -               TxBulkEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
6447 -               TxBulkEPList[i].sub = 0;
6448 -               TxBulkEPList[i].next = virt_to_phys(&TxBulkEPList[i + 1]);
6449 -
6450 -               /* Initiate two EPs, disabled and with the eol flag set. No need for any
6451 -                  preserved epid. */
6452 -
6453 -               /* The first one has the intr flag set so we get an interrupt when the DMA
6454 -                  channel is about to become disabled. */
6455 -               CHECK_ALIGN(&TxBulkDummyEPList[i][0]);
6456 -               TxBulkDummyEPList[i][0].hw_len = 0;
6457 -               TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
6458 -                                                  IO_STATE(USB_EP_command, eol, yes) |
6459 -                                                  IO_STATE(USB_EP_command, intr, yes));
6460 -               TxBulkDummyEPList[i][0].sub = 0;
6461 -               TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]);
6462 -
6463 -               /* The second one. */
6464 -               CHECK_ALIGN(&TxBulkDummyEPList[i][1]);
6465 -               TxBulkDummyEPList[i][1].hw_len = 0;
6466 -               TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
6467 -                                                  IO_STATE(USB_EP_command, eol, yes));
6468 -               TxBulkDummyEPList[i][1].sub = 0;
6469 -               /* The last dummy's next pointer is the same as the current EP's next pointer. */
6470 -               TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]);
6471 -       }
6472 +/* Lock for dma8 sub0 handling */
6473 +static spinlock_t etrax_dma8_sub0_lock;
6474  
6475 -       /* Configure the last one. */
6476 -       CHECK_ALIGN(&TxBulkEPList[i]);
6477 -       TxBulkEPList[i].hw_len = 0;
6478 -       TxBulkEPList[i].command = (IO_STATE(USB_EP_command, eol, yes) |
6479 -                                  IO_FIELD(USB_EP_command, epid, i));
6480 -       TxBulkEPList[i].sub = 0;
6481 -       TxBulkEPList[i].next = virt_to_phys(&TxBulkEPList[0]);
6482 -
6483 -       /* No need configuring dummy EPs for the last one as it will never be used for
6484 -          bulk traffic (i == INVALD_EPID at this point). */
6485 -
6486 -       /* Set up to start on the last EP so we will enable it when inserting traffic
6487 -          for the first time (imitating the situation where the DMA has stopped
6488 -          because there was no more traffic). */
6489 -       *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]);
6490 -       /* No point in starting the bulk channel yet.
6491 -        *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
6492 -       DBFEXIT;
6493 -}
6494 +/* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
6495 +   Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be
6496 +   cache aligned. */
6497 +static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
6498 +static volatile struct USB_IN_Desc RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
6499  
6500 -static void init_tx_ctrl_ep(void)
6501 -{
6502 -       int i;
6503 +/* Pointers into RxDescList. */
6504 +static volatile struct USB_IN_Desc *myNextRxDesc;
6505 +static volatile struct USB_IN_Desc *myLastRxDesc;
6506  
6507 -       DBFENTER;
6508 +/* A zout transfer makes a memory access at the address of its buf pointer,
6509 +   which means that setting this buf pointer to 0 will cause an access to the
6510 +   flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes
6511 +   (depending on DMA burst size) transfer.
6512 +   Instead, we set it to 1, and point it to this buffer. */
6513 +static int zout_buffer[4] __attribute__ ((aligned (4)));
6514  
6515 -       for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
6516 -               CHECK_ALIGN(&TxCtrlEPList[i]);
6517 -               TxCtrlEPList[i].hw_len = 0;
6518 -               TxCtrlEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
6519 -               TxCtrlEPList[i].sub = 0;
6520 -               TxCtrlEPList[i].next = virt_to_phys(&TxCtrlEPList[i + 1]);
6521 -       }
6522 +/* Cache for allocating new EP and SB descriptors. */
6523 +static kmem_cache_t *usb_desc_cache;
6524  
6525 -       CHECK_ALIGN(&TxCtrlEPList[i]);
6526 -       TxCtrlEPList[i].hw_len = 0;
6527 -       TxCtrlEPList[i].command = (IO_STATE(USB_EP_command, eol, yes) |
6528 -                                  IO_FIELD(USB_EP_command, epid, i));
6529 +/* Cache for the data allocated in the isoc descr top half. */
6530 +static kmem_cache_t *isoc_compl_cache;
6531  
6532 -       TxCtrlEPList[i].sub = 0;
6533 -       TxCtrlEPList[i].next = virt_to_phys(&TxCtrlEPList[0]);
6534 +/* Cache for the data allocated when delayed finishing of URBs */
6535 +static kmem_cache_t *later_data_cache;
6536  
6537 -       *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[0]);
6538 -       *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
6539  
6540 -       DBFEXIT;
6541 +/* Counter to keep track of how many Isoc EP we have sat up. Used to enable
6542 +   and disable iso_eof interrupt. We only need these interrupts when we have
6543 +   Isoc data endpoints (consumes CPU cycles).
6544 +   FIXME: This could be more fine granular, so this interrupt is only enabled
6545 +   when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */
6546 +static int isoc_epid_counter;
6547 +
6548 +/* Protecting wrapper functions for R_USB_EPT_x */
6549 +/* -------------------------------------------- */
6550 +static inline void etrax_epid_set(__u8 index, __u32 data) {
6551 +  unsigned long flags;
6552 +  spin_lock_irqsave(&etrax_epid_lock, flags);
6553 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6554 +  nop();
6555 +  *R_USB_EPT_DATA = data;
6556 +  spin_unlock_irqrestore(&etrax_epid_lock, flags);
6557 +}
6558 +
6559 +static inline void etrax_epid_clear_error(__u8 index) {
6560 +  unsigned long flags;
6561 +  spin_lock_irqsave(&etrax_epid_lock, flags);
6562 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6563 +  nop();
6564 +  *R_USB_EPT_DATA &=
6565 +    ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
6566 +      IO_MASK(R_USB_EPT_DATA, error_count_out) |
6567 +      IO_MASK(R_USB_EPT_DATA, error_code));
6568 +  spin_unlock_irqrestore(&etrax_epid_lock, flags);
6569 +}
6570 +
6571 +static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
6572 +                                             __u8 toggle) {
6573 +  unsigned long flags;
6574 +  spin_lock_irqsave(&etrax_epid_lock, flags);
6575 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6576 +  nop();
6577 +  if(dirout) {
6578 +    *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
6579 +    *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
6580 +  } else {
6581 +    *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
6582 +    *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
6583 +  }
6584 +  spin_unlock_irqrestore(&etrax_epid_lock, flags);
6585 +}
6586 +
6587 +static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout) {
6588 +  unsigned long flags;
6589 +  __u8 toggle;
6590 +  spin_lock_irqsave(&etrax_epid_lock, flags);
6591 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6592 +  nop();
6593 +  if (dirout) {
6594 +    toggle = IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
6595 +  } else {
6596 +    toggle = IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
6597 +  }
6598 +  spin_unlock_irqrestore(&etrax_epid_lock, flags);
6599 +  return toggle;
6600 +}
6601 +
6602 +
6603 +static inline __u32 etrax_epid_get(__u8 index) {
6604 +  unsigned long flags;
6605 +  __u32 data;
6606 +  spin_lock_irqsave(&etrax_epid_lock, flags);
6607 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6608 +  nop();
6609 +  data = *R_USB_EPT_DATA;
6610 +  spin_unlock_irqrestore(&etrax_epid_lock, flags);
6611 +  return data;
6612 +}
6613 +
6614 +
6615 +
6616 +
6617 +/* Main functions for Transfer Controller */
6618 +/* -------------------------------------- */
6619 +
6620 +/* Init structs, memories and lists used by Transfer Controller */
6621 +int tc_init(struct usb_hcd *hcd) {
6622 +  int i;
6623 +  /* Clear software state info for all epids */
6624 +  memset(epid_state, 0, sizeof(struct etrax_epid) * NBR_OF_EPIDS);
6625 +
6626 +  /* Set Invalid and Dummy as being in use and disabled */
6627 +  epid_state[INVALID_EPID].inuse = 1;
6628 +  epid_state[DUMMY_EPID].inuse = 1;
6629 +  epid_state[INVALID_EPID].disabled = 1;
6630 +  epid_state[DUMMY_EPID].disabled = 1;
6631 +
6632 +  /* Clear counter for how many Isoc epids we have sat up */
6633 +  isoc_epid_counter = 0;
6634 +
6635 +  /* Initialize the urb list by initiating a head for each list.
6636 +     Also reset list hodling active URB for each epid */
6637 +  for (i = 0; i < NBR_OF_EPIDS; i++) {
6638 +    INIT_LIST_HEAD(&urb_list[i]);
6639 +    activeUrbList[i] = NULL;
6640 +  }
6641 +
6642 +  /* Init lock for URB lists */
6643 +  spin_lock_init(&urb_list_lock);
6644 +  /* Init lock for Etrax R_USB_EPT register */
6645 +  spin_lock_init(&etrax_epid_lock);
6646 +  /* Init lock for Etrax dma8 sub0 handling */
6647 +  spin_lock_init(&etrax_dma8_sub0_lock);
6648 +
6649 +  /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
6650 +
6651 +  /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also
6652 +     allocate SB descriptors from this cache. This is ok since
6653 +     sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */
6654 +  usb_desc_cache = kmem_cache_create("usb_desc_cache",
6655 +                                    sizeof(struct USB_EP_Desc), 0,
6656 +                                    SLAB_HWCACHE_ALIGN, 0, 0);
6657 +  if(usb_desc_cache == NULL) {
6658 +    return -ENOMEM;
6659 +  }
6660 +
6661 +  /* Create slab cache for speedy allocation of memory for isoc bottom-half
6662 +     interrupt handling */
6663 +  isoc_compl_cache =
6664 +    kmem_cache_create("isoc_compl_cache",
6665 +                     sizeof(struct crisv10_isoc_complete_data),
6666 +                     0, SLAB_HWCACHE_ALIGN, 0, 0);
6667 +  if(isoc_compl_cache == NULL) {
6668 +    return -ENOMEM;
6669 +  }
6670 +
6671 +  /* Create slab cache for speedy allocation of memory for later URB finish
6672 +     struct */
6673 +  later_data_cache =
6674 +    kmem_cache_create("later_data_cache",
6675 +                     sizeof(struct urb_later_data),
6676 +                     0, SLAB_HWCACHE_ALIGN, 0, 0);
6677 +  if(later_data_cache == NULL) {
6678 +    return -ENOMEM;
6679 +  }
6680 +
6681 +
6682 +  /* Initiate the bulk start timer. */
6683 +  init_timer(&bulk_start_timer);
6684 +  bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
6685 +  bulk_start_timer.function = tc_bulk_start_timer_func;
6686 +  add_timer(&bulk_start_timer);
6687 +
6688 +
6689 +  /* Initiate the bulk eot timer. */
6690 +  init_timer(&bulk_eot_timer);
6691 +  bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
6692 +  bulk_eot_timer.function = tc_bulk_eot_timer_func;
6693 +  bulk_eot_timer.data = (unsigned long)hcd;
6694 +  add_timer(&bulk_eot_timer);
6695 +
6696 +  return 0;
6697 +}
6698 +
6699 +/* Uninitialize all resources used by Transfer Controller */
6700 +void tc_destroy(void) {
6701 +
6702 +  /* Destroy all slab cache */
6703 +  kmem_cache_destroy(usb_desc_cache);
6704 +  kmem_cache_destroy(isoc_compl_cache);
6705 +  kmem_cache_destroy(later_data_cache);
6706 +
6707 +  /* Remove timers */
6708 +  del_timer(&bulk_start_timer);
6709 +  del_timer(&bulk_eot_timer);
6710 +}
6711 +
6712 +static void restart_dma8_sub0(void) {
6713 +  unsigned long flags;
6714 +  spin_lock_irqsave(&etrax_dma8_sub0_lock, flags);
6715 +  /* Verify that the dma is not running */
6716 +  if ((*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd)) == 0) {
6717 +    struct USB_EP_Desc *ep = (struct USB_EP_Desc *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
6718 +    while (DUMMY_EPID == IO_EXTRACT(USB_EP_command, epid, ep->command)) {
6719 +      ep = (struct USB_EP_Desc *)phys_to_virt(ep->next);
6720 +    }
6721 +    /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID.
6722 +     * ep->next is already a physical address; no need for a virt_to_phys. */
6723 +    *R_DMA_CH8_SUB0_EP = ep->next;
6724 +    /* Restart the DMA */
6725 +    *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
6726 +  }
6727 +  spin_unlock_irqrestore(&etrax_dma8_sub0_lock, flags);
6728 +}
6729 +
6730 +/* queue an URB with the transfer controller (called from hcd_driver) */
6731 +static int tc_urb_enqueue(struct usb_hcd *hcd, 
6732 +                         struct usb_host_endpoint *ep,
6733 +                         struct urb *urb, 
6734 +                         gfp_t mem_flags) {
6735 +  int epid;
6736 +  int retval;
6737 +  int bustime = 0;
6738 +  int maxpacket;
6739 +  unsigned long flags;
6740 +  struct crisv10_urb_priv *urb_priv;
6741 +  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
6742 +  DBFENTER;
6743 +
6744 +  if(!(crisv10_hcd->running)) {
6745 +    /* The USB Controller is not running, probably because no device is 
6746 +       attached. No idea to enqueue URBs then */
6747 +    tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n",
6748 +           (unsigned int)urb);
6749 +    return -ENOENT;
6750 +  }
6751 +
6752 +  maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
6753 +  /* Special case check for In Isoc transfers. Specification states that each
6754 +     In Isoc transfer consists of one packet and therefore it should fit into
6755 +     the transfer-buffer of an URB.
6756 +     We do the check here to be sure (an invalid scenario can be produced with
6757 +     parameters to the usbtest suite) */
6758 +  if(usb_pipeisoc(urb->pipe) && usb_pipein(urb->pipe) &&
6759 +     (urb->transfer_buffer_length < maxpacket)) {
6760 +    tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb->transfer_buffer_length, maxpacket);
6761 +    return -EMSGSIZE;
6762 +  }
6763 +
6764 +  /* Check if there is enough bandwidth for periodic transfer  */
6765 +  if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe)) {
6766 +    /* only check (and later claim) if not already claimed */
6767 +    if (urb->bandwidth == 0) {
6768 +      bustime = usb_check_bandwidth(urb->dev, urb);
6769 +      if (bustime < 0) {
6770 +       tc_err("Not enough periodic bandwidth\n");
6771 +       return -ENOSPC;
6772 +      }
6773 +    }
6774 +  }
6775 +
6776 +  /* Check if there is a epid for URBs destination, if not this function
6777 +     set up one. */
6778 +  epid = tc_setup_epid(ep, urb, mem_flags);
6779 +  if (epid < 0) {
6780 +    tc_err("Failed setup epid:%d for URB:0x%x\n", epid, (unsigned int)urb);
6781 +    DBFEXIT;
6782 +    return -ENOMEM;
6783 +  }
6784 +
6785 +  if(urb == activeUrbList[epid]) {
6786 +    tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb);
6787 +    return -ENXIO;
6788 +  }
6789 +
6790 +  if(urb_list_entry(urb, epid)) {
6791 +    tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb);
6792 +    return -ENXIO;
6793 +  }
6794 +
6795 +  /* If we actively have flaged endpoint as disabled then refuse submition */
6796 +  if(epid_state[epid].disabled) {
6797 +    return -ENOENT;
6798 +  }
6799 +
6800 +  /* Allocate and init HC-private data for URB */
6801 +  if(urb_priv_create(hcd, urb, epid, mem_flags) != 0) {
6802 +    DBFEXIT;
6803 +    return -ENOMEM;
6804 +  }
6805 +  urb_priv = urb->hcpriv;
6806 +
6807 +  tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n",
6808 +        (unsigned int)urb, urb_priv->urb_num, epid,
6809 +        pipe_to_str(urb->pipe), urb->transfer_buffer_length);
6810 +
6811 +  /* Create and link SBs required for this URB */
6812 +  retval = create_sb_for_urb(urb, mem_flags);
6813 +  if(retval != 0) {
6814 +    tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb,
6815 +          urb_priv->urb_num);
6816 +    urb_priv_free(hcd, urb);
6817 +    DBFEXIT;
6818 +    return retval;
6819 +  }
6820 +
6821 +  /* Init intr EP pool if this URB is a INTR transfer. This pool is later
6822 +     used when inserting EPs in the TxIntrEPList. We do the alloc here
6823 +     so we can't run out of memory later */
6824 +  if(usb_pipeint(urb->pipe)) {
6825 +    retval = init_intr_urb(urb, mem_flags);
6826 +    if(retval != 0) {
6827 +      tc_warn("Failed to init Intr URB\n");
6828 +      urb_priv_free(hcd, urb);
6829 +      DBFEXIT;
6830 +      return retval;
6831 +    }
6832 +  }
6833 +
6834 +  /* Disable other access when inserting USB */
6835 +  local_irq_save(flags);
6836 +
6837 +  /* Claim bandwidth, if needed */
6838 +  if(bustime) {
6839 +    usb_claim_bandwidth(urb->dev, urb, bustime, 0);
6840 +  }
6841 +  
6842 +  /* Add URB to EP queue */
6843 +  urb_list_add(urb, epid, mem_flags);
6844 +
6845 +  if(usb_pipeisoc(urb->pipe)) {
6846 +    /* Special processing of Isoc URBs. */
6847 +    tc_dma_process_isoc_urb(urb);
6848 +  } else {
6849 +    /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
6850 +    tc_dma_process_queue(epid);
6851 +  }
6852 +
6853 +  local_irq_restore(flags);
6854 +
6855 +  DBFEXIT;
6856 +  return 0;
6857 +}
6858 +
6859 +/* remove an URB from the transfer controller queues (called from hcd_driver)*/
6860 +static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) {
6861 +  struct crisv10_urb_priv *urb_priv;
6862 +  unsigned long flags;
6863 +  int epid;
6864 +
6865 +  DBFENTER;
6866 +  /* Disable interrupts here since a descriptor interrupt for the isoc epid
6867 +     will modify the sb list.  This could possibly be done more granular, but
6868 +     urb_dequeue should not be used frequently anyway.
6869 +  */
6870 +  local_irq_save(flags);
6871 +
6872 +  urb_priv = urb->hcpriv;
6873 +
6874 +  if (!urb_priv) {
6875 +    /* This happens if a device driver calls unlink on an urb that
6876 +       was never submitted (lazy driver) or if the urb was completed
6877 +       while dequeue was being called. */
6878 +    tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb);
6879 +    local_irq_restore(flags);
6880 +    return 0;
6881 +  }
6882 +  epid = urb_priv->epid;
6883 +
6884 +  tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
6885 +         (urb == activeUrbList[epid]) ? "active" : "queued",
6886 +         (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
6887 +         str_type(urb->pipe), epid, urb->status,
6888 +         (urb_priv->later_data) ? "later-sched" : "");
6889 +
6890 +  /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB
6891 +     that isn't active can be dequeued by just removing it from the queue */
6892 +  if(usb_pipebulk(urb->pipe) || usb_pipecontrol(urb->pipe) ||
6893 +     usb_pipeint(urb->pipe)) {
6894 +
6895 +    /* Check if URB haven't gone further than the queue */
6896 +    if(urb != activeUrbList[epid]) {
6897 +      ASSERT(urb_priv->later_data == NULL);
6898 +      tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue"
6899 +             " (not active)\n", (unsigned int)urb, urb_priv->urb_num,
6900 +             str_dir(urb->pipe), str_type(urb->pipe), epid);
6901 +      
6902 +      /* Finish the URB with error status from USB core */
6903 +      tc_finish_urb(hcd, urb, urb->status);
6904 +      local_irq_restore(flags);
6905 +      return 0;
6906 +    }
6907 +  }
6908 +
6909 +  /* Set URB status to Unlink for handling when interrupt comes. */
6910 +  urb_priv->urb_state = UNLINK;
6911 +
6912 +  /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */
6913 +  switch(usb_pipetype(urb->pipe)) {
6914 +  case PIPE_BULK:
6915 +    /* Check if EP still is enabled */
6916 +    if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
6917 +      /* The EP was enabled, disable it. */
6918 +      TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
6919 +    }
6920 +    /* Kicking dummy list out of the party. */
6921 +    TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
6922 +    break;
6923 +  case PIPE_CONTROL:
6924 +    /* Check if EP still is enabled */
6925 +    if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
6926 +      /* The EP was enabled, disable it. */
6927 +      TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
6928 +    }
6929 +    break;
6930 +  case PIPE_ISOCHRONOUS:
6931 +    /* Disabling, busy-wait and unlinking of Isoc SBs will be done in
6932 +       finish_isoc_urb(). Because there might the case when URB is dequeued
6933 +       but there are other valid URBs waiting */
6934 +
6935 +    /* Check if In Isoc EP still is enabled */
6936 +    if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
6937 +      /* The EP was enabled, disable it. */
6938 +      TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
6939 +    }
6940 +    break;
6941 +  case PIPE_INTERRUPT:
6942 +    /* Special care is taken for interrupt URBs. EPs are unlinked in
6943 +       tc_finish_urb */
6944 +    break;
6945 +  default:
6946 +    break;
6947 +  }
6948 +
6949 +  /* Asynchronous unlink, finish the URB later from scheduled or other
6950 +     event (data finished, error) */
6951 +  tc_finish_urb_later(hcd, urb, urb->status);
6952 +
6953 +  local_irq_restore(flags);
6954 +  DBFEXIT;
6955 +  return 0;
6956 +}
6957 +
6958 +
6959 +static void tc_sync_finish_epid(struct usb_hcd *hcd, int epid) {
6960 +  volatile int timeout = 10000;
6961 +  struct urb* urb;
6962 +  struct crisv10_urb_priv* urb_priv;
6963 +  unsigned long flags;
6964 +  
6965 +  volatile struct USB_EP_Desc *first_ep;  /* First EP in the list. */
6966 +  volatile struct USB_EP_Desc *curr_ep;   /* Current EP, the iterator. */
6967 +  volatile struct USB_EP_Desc *next_ep;   /* The EP after current. */
6968 +
6969 +  int type = epid_state[epid].type;
6970 +
6971 +  /* Setting this flag will cause enqueue() to return -ENOENT for new
6972 +     submitions on this endpoint and finish_urb() wont process queue further */
6973 +  epid_state[epid].disabled = 1;
6974 +
6975 +  switch(type) {
6976 +  case PIPE_BULK:
6977 +    /* Check if EP still is enabled */
6978 +    if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
6979 +      /* The EP was enabled, disable it. */
6980 +      TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
6981 +      tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
6982 +
6983 +      /* Do busy-wait until DMA not using this EP descriptor anymore */
6984 +      while((*R_DMA_CH8_SUB0_EP ==
6985 +            virt_to_phys(&TxBulkEPList[epid])) &&
6986 +           (timeout-- > 0));
6987 +      if(timeout == 0) {
6988 +       warn("Timeout while waiting for DMA-TX-Bulk to leave EP for"
6989 +            " epid:%d\n", epid);
6990 +      }
6991 +    }
6992 +    break;
6993 +
6994 +  case PIPE_CONTROL:
6995 +    /* Check if EP still is enabled */
6996 +    if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
6997 +      /* The EP was enabled, disable it. */
6998 +      TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
6999 +      tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
7000 +
7001 +      /* Do busy-wait until DMA not using this EP descriptor anymore */
7002 +      while((*R_DMA_CH8_SUB1_EP ==
7003 +            virt_to_phys(&TxCtrlEPList[epid])) &&
7004 +           (timeout-- > 0));
7005 +      if(timeout == 0) {
7006 +       warn("Timeout while waiting for DMA-TX-Ctrl to leave EP for"
7007 +            " epid:%d\n", epid);
7008 +      }
7009 +    }
7010 +    break;
7011 +
7012 +  case PIPE_INTERRUPT:
7013 +    local_irq_save(flags);
7014 +    /* Disable all Intr EPs belonging to epid */
7015 +    first_ep = &TxIntrEPList[0];
7016 +    curr_ep = first_ep;
7017 +    do {
7018 +      next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
7019 +      if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
7020 +       /* Disable EP */
7021 +       next_ep->command &= ~IO_MASK(USB_EP_command, enable);
7022 +      }
7023 +      curr_ep = phys_to_virt(curr_ep->next);
7024 +    } while (curr_ep != first_ep);
7025 +
7026 +    local_irq_restore(flags);
7027 +    break;
7028 +
7029 +  case PIPE_ISOCHRONOUS:
7030 +    /* Check if EP still is enabled */
7031 +    if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
7032 +      tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid);
7033 +      /* The EP was enabled, disable it. */
7034 +      TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
7035 +      
7036 +      while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
7037 +           (timeout-- > 0));
7038 +      if(timeout == 0) {
7039 +       warn("Timeout while waiting for DMA-TX-Isoc to leave EP for"
7040 +            " epid:%d\n", epid);
7041 +      }
7042 +    }
7043 +    break;
7044 +  }
7045 +
7046 +  local_irq_save(flags);
7047 +
7048 +  /* Finish if there is active URB for this endpoint */
7049 +  if(activeUrbList[epid] != NULL) {
7050 +    urb = activeUrbList[epid];
7051 +    urb_priv = urb->hcpriv;
7052 +    ASSERT(urb_priv);
7053 +    tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
7054 +           (urb == activeUrbList[epid]) ? "active" : "queued",
7055 +           (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
7056 +           str_type(urb->pipe), epid, urb->status,
7057 +           (urb_priv->later_data) ? "later-sched" : "");
7058 +
7059 +    tc_finish_urb(hcd, activeUrbList[epid], -ENOENT);
7060 +    ASSERT(activeUrbList[epid] == NULL);
7061 +  }
7062 +
7063 +  /* Finish any queued URBs for this endpoint. There won't be any resubmitions
7064 +     because epid_disabled causes enqueue() to fail for this endpoint */
7065 +  while((urb = urb_list_first(epid)) != NULL) {
7066 +    urb_priv = urb->hcpriv;
7067 +    ASSERT(urb_priv);
7068 +
7069 +    tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
7070 +           (urb == activeUrbList[epid]) ? "active" : "queued",
7071 +           (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
7072 +           str_type(urb->pipe), epid, urb->status,
7073 +           (urb_priv->later_data) ? "later-sched" : "");
7074 +
7075 +    tc_finish_urb(hcd, urb, -ENOENT);
7076 +  }
7077 +  epid_state[epid].disabled = 0;
7078 +  local_irq_restore(flags);
7079 +}
7080 +
7081 +/* free resources associated with an endpoint (called from hcd_driver) */
7082 +static void tc_endpoint_disable(struct usb_hcd *hcd, 
7083 +                               struct usb_host_endpoint *ep) {
7084 +  DBFENTER;
7085 +  /* Only free epid if it has been allocated. We get two endpoint_disable
7086 +     requests for ctrl endpoints so ignore the second one */
7087 +  if(ep->hcpriv != NULL) {
7088 +    struct crisv10_ep_priv *ep_priv = ep->hcpriv;
7089 +    int epid = ep_priv->epid;
7090 +    tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n",
7091 +          (unsigned int)ep, (unsigned int)ep->hcpriv,
7092 +          endpoint_to_str(&(ep->desc)), epid);
7093 +
7094 +    tc_sync_finish_epid(hcd, epid);
7095 +
7096 +    ASSERT(activeUrbList[epid] == NULL);
7097 +    ASSERT(list_empty(&urb_list[epid]));
7098 +
7099 +    tc_free_epid(ep);
7100 +  } else {
7101 +    tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep,
7102 +          (unsigned int)ep->hcpriv, endpoint_to_str(&(ep->desc)));
7103 +  }
7104 +  DBFEXIT;
7105 +}
7106 +
7107 +static void tc_finish_urb_later_proc(void *data) {
7108 +  unsigned long flags;
7109 +  struct urb_later_data* uld = (struct urb_later_data*)data;
7110 +  local_irq_save(flags);
7111 +  if(uld->urb == NULL) {
7112 +    late_dbg("Later finish of URB = NULL (allready finished)\n");
7113 +  } else {
7114 +    struct crisv10_urb_priv* urb_priv = uld->urb->hcpriv;
7115 +    ASSERT(urb_priv);
7116 +    if(urb_priv->urb_num == uld->urb_num) {
7117 +      late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld->urb),
7118 +              urb_priv->urb_num);
7119 +      if(uld->status != uld->urb->status) {
7120 +       errno_dbg("Later-finish URB with status:%d, later-status:%d\n",
7121 +                 uld->urb->status, uld->status);
7122 +      }
7123 +      if(uld != urb_priv->later_data) {
7124 +       panic("Scheduled uld not same as URBs uld\n");
7125 +      }
7126 +      tc_finish_urb(uld->hcd, uld->urb, uld->status);
7127 +    } else {
7128 +      late_warn("Ignoring later finish of URB:0x%x[%d]"
7129 +               ", urb_num doesn't match current URB:0x%x[%d]",
7130 +               (unsigned int)(uld->urb), uld->urb_num,
7131 +               (unsigned int)(uld->urb), urb_priv->urb_num);
7132 +    }
7133 +  }
7134 +  local_irq_restore(flags);
7135 +  kmem_cache_free(later_data_cache, uld);
7136 +}
7137 +
7138 +static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
7139 +                               int status) {
7140 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
7141 +  struct urb_later_data* uld;
7142 +
7143 +  ASSERT(urb_priv);
7144 +
7145 +  if(urb_priv->later_data != NULL) {
7146 +    /* Later-finish allready scheduled for this URB, just update status to
7147 +       return when finishing later */
7148 +    errno_dbg("Later-finish schedule change URB status:%d with new"
7149 +             " status:%d\n", urb_priv->later_data->status, status);
7150 +    
7151 +    urb_priv->later_data->status = status;
7152 +    return;
7153 +  }
7154 +
7155 +  uld = kmem_cache_alloc(later_data_cache, SLAB_ATOMIC);
7156 +  ASSERT(uld);
7157 +
7158 +  uld->hcd = hcd;
7159 +  uld->urb = urb;
7160 +  uld->urb_num = urb_priv->urb_num;
7161 +  uld->status = status;
7162 +
7163 +  INIT_WORK(&uld->ws, tc_finish_urb_later_proc, uld);
7164 +  urb_priv->later_data = uld;
7165 +
7166 +  /* Schedule the finishing of the URB to happen later */
7167 +  schedule_delayed_work(&uld->ws, LATER_TIMER_DELAY);
7168 +}
7169 +
7170 +static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
7171 +                              int status);
7172 +
7173 +static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status) {
7174 +  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
7175 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
7176 +  int epid;
7177 +  char toggle;
7178 +  int urb_num;
7179 +
7180 +  DBFENTER;
7181 +  ASSERT(urb_priv != NULL);
7182 +  epid = urb_priv->epid;
7183 +  urb_num = urb_priv->urb_num;
7184 +
7185 +  if(urb != activeUrbList[epid]) {
7186 +    if(urb_list_entry(urb, epid)) {
7187 +      /* Remove this URB from the list. Only happens when URB are finished
7188 +        before having been processed (dequeing) */
7189 +      urb_list_del(urb, epid);
7190 +    } else {
7191 +      tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for"
7192 +             " epid:%d\n", (unsigned int)urb, urb_num, epid);
7193 +    }
7194 +  }
7195 +
7196 +  /* Cancel any pending later-finish of this URB */
7197 +  if(urb_priv->later_data) {
7198 +    urb_priv->later_data->urb = NULL;
7199 +  }
7200 +
7201 +  /* For an IN pipe, we always set the actual length, regardless of whether
7202 +     there was an error or not (which means the device driver can use the data
7203 +     if it wants to). */
7204 +  if(usb_pipein(urb->pipe)) {
7205 +    urb->actual_length = urb_priv->rx_offset;
7206 +  } else {
7207 +    /* Set actual_length for OUT urbs also; the USB mass storage driver seems
7208 +       to want that. */
7209 +    if (status == 0 && urb->status == -EINPROGRESS) {
7210 +      urb->actual_length = urb->transfer_buffer_length;
7211 +    } else {
7212 +      /*  We wouldn't know of any partial writes if there was an error. */
7213 +      urb->actual_length = 0;
7214 +    }
7215 +  }
7216 +
7217 +
7218 +  /* URB status mangling */
7219 +  if(urb->status == -EINPROGRESS) {
7220 +    /* The USB core hasn't changed the status, let's set our finish status */
7221 +    urb->status = status;
7222 +
7223 +    if ((status == 0) && (urb->transfer_flags & URB_SHORT_NOT_OK) &&
7224 +       usb_pipein(urb->pipe) &&
7225 +       (urb->actual_length != urb->transfer_buffer_length)) {
7226 +      /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's
7227 +        max length) is to be treated as an error. */
7228 +      errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short"
7229 +               " data:%d\n", (unsigned int)urb, urb_num,
7230 +               urb->actual_length);
7231 +      urb->status = -EREMOTEIO;
7232 +    }
7233 +
7234 +    if(urb_priv->urb_state == UNLINK) {
7235 +      /* URB has been requested to be unlinked asynchronously */
7236 +      urb->status = -ECONNRESET;
7237 +      errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n",
7238 +               (unsigned int)urb, urb_num, urb->status);
7239 +    }
7240 +  } else {
7241 +    /* The USB Core wants to signal some error via the URB, pass it through */
7242 +  }
7243 +
7244 +  /* use completely different finish function for Isoc URBs */
7245 +  if(usb_pipeisoc(urb->pipe)) {
7246 +    tc_finish_isoc_urb(hcd, urb, status);
7247 +    return;
7248 +  }
7249 +
7250 +  /* Do special unlinking of EPs for Intr traffic */
7251 +  if(usb_pipeint(urb->pipe)) {
7252 +    tc_dma_unlink_intr_urb(urb);
7253 +  }
7254 +
7255 +  /* Release allocated bandwidth for periodic transfers */
7256 +  if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe))
7257 +    usb_release_bandwidth(urb->dev, urb, 0);
7258 +
7259 +  /* This URB is active on EP */
7260 +  if(urb == activeUrbList[epid]) {
7261 +    /* We need to fiddle with the toggle bits because the hardware doesn't do
7262 +       it for us. */
7263 +    toggle = etrax_epid_get_toggle(epid, usb_pipeout(urb->pipe));
7264 +    usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
7265 +                 usb_pipeout(urb->pipe), toggle);
7266 +
7267 +    /* Checks for Ctrl and Bulk EPs */
7268 +    switch(usb_pipetype(urb->pipe)) {
7269 +    case PIPE_BULK:
7270 +      /* Check so Bulk EP realy is disabled before finishing active URB  */
7271 +      ASSERT((TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
7272 +            IO_STATE(USB_EP_command, enable, no));
7273 +      /* Disable sub-pointer for EP to avoid next tx_interrupt() to
7274 +        process Bulk EP. */
7275 +      TxBulkEPList[epid].sub = 0;
7276 +      /* No need to wait for the DMA before changing the next pointer.
7277 +        The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
7278 +        the last one (INVALID_EPID) for actual traffic. */
7279 +      TxBulkEPList[epid].next = 
7280 +       virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
7281 +      break;
7282 +    case PIPE_CONTROL:
7283 +      /* Check so Ctrl EP realy is disabled before finishing active URB  */
7284 +      ASSERT((TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
7285 +            IO_STATE(USB_EP_command, enable, no));
7286 +      /* Disable sub-pointer for EP to avoid next tx_interrupt() to
7287 +        process Ctrl EP. */
7288 +      TxCtrlEPList[epid].sub = 0;
7289 +      break;
7290 +    }
7291 +  }
7292 +
7293 +  /* Free HC-private URB data*/
7294 +  urb_priv_free(hcd, urb);
7295 +
7296 +  if(urb->status) {
7297 +    errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
7298 +             (unsigned int)urb, urb_num, str_dir(urb->pipe),
7299 +             str_type(urb->pipe), urb->actual_length, urb->status);
7300 +  } else {
7301 +    tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
7302 +          (unsigned int)urb, urb_num, str_dir(urb->pipe),
7303 +          str_type(urb->pipe), urb->actual_length, urb->status);
7304 +  }
7305 +
7306 +  /* If we just finished an active URB, clear active pointer. */
7307 +  if (urb == activeUrbList[epid]) {
7308 +    /* Make URB not active on EP anymore */
7309 +    activeUrbList[epid] = NULL;
7310 +
7311 +    if(urb->status == 0) {
7312 +      /* URB finished sucessfully, process queue to see if there are any more
7313 +        URBs waiting before we call completion function.*/
7314 +      if(crisv10_hcd->running) {
7315 +       /* Only process queue if USB controller is running */
7316 +       tc_dma_process_queue(epid);
7317 +      } else {
7318 +       tc_warn("No processing of queue for epid:%d, USB Controller not"
7319 +               " running\n", epid);
7320 +      }
7321 +    }
7322 +  }
7323 +
7324 +  /*  Hand the URB from HCD to its USB device driver, using its completion
7325 +      functions */
7326 +  usb_hcd_giveback_urb (hcd, urb);
7327 +
7328 +  /* Check the queue once more if the URB returned with error, because we
7329 +     didn't do it before the completion function because the specification
7330 +     states that the queue should not restart until all it's unlinked
7331 +     URBs have been fully retired, with the completion functions run */
7332 +  if(crisv10_hcd->running) {
7333 +    /* Only process queue if USB controller is running */
7334 +    tc_dma_process_queue(epid);
7335 +  } else {
7336 +    tc_warn("No processing of queue for epid:%d, USB Controller not running\n",
7337 +           epid);
7338 +  }
7339 +
7340 +  DBFEXIT;
7341 +}
7342 +
7343 +static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
7344 +                              int status) {
7345 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
7346 +  int epid, i;
7347 +  volatile int timeout = 10000;
7348 +
7349 +  ASSERT(urb_priv);
7350 +  epid = urb_priv->epid;
7351 +
7352 +  ASSERT(usb_pipeisoc(urb->pipe));
7353 +
7354 +  /* Set that all isoc packets have status and length set before
7355 +     completing the urb. */
7356 +  for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++){
7357 +    urb->iso_frame_desc[i].actual_length = 0;
7358 +    urb->iso_frame_desc[i].status = -EPROTO;
7359 +  }
7360 +
7361 +  /* Check if the URB is currently active (done or error) */
7362 +  if(urb == activeUrbList[epid]) {
7363 +    /* Check if there are another In Isoc URB queued for this epid */
7364 +    if (!list_empty(&urb_list[epid])&& !epid_state[epid].disabled) {
7365 +      /* Move it from queue to active and mark it started so Isoc transfers
7366 +        won't be interrupted.
7367 +        All Isoc URBs data transfers are already added to DMA lists so we
7368 +        don't have to insert anything in DMA lists here. */
7369 +      activeUrbList[epid] = urb_list_first(epid);
7370 +      ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_state =
7371 +       STARTED;
7372 +      urb_list_del(activeUrbList[epid], epid);
7373 +
7374 +      if(urb->status) {
7375 +       errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
7376 +                 " status:%d, new waiting URB:0x%x[%d]\n",
7377 +                 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
7378 +                 str_type(urb->pipe), urb_priv->isoc_packet_counter,
7379 +                 urb->number_of_packets, urb->status,
7380 +                 (unsigned int)activeUrbList[epid],
7381 +                 ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_num);
7382 +      }
7383 +
7384 +    } else { /* No other URB queued for this epid */
7385 +      if(urb->status) {
7386 +       errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
7387 +                 " status:%d, no new URB waiting\n",
7388 +                 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
7389 +                 str_type(urb->pipe), urb_priv->isoc_packet_counter,
7390 +                 urb->number_of_packets, urb->status);
7391 +      }
7392 +
7393 +      /* Check if EP is still enabled, then shut it down. */
7394 +      if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
7395 +       isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid);
7396 +
7397 +       /* Should only occur for In Isoc EPs where SB isn't consumed. */
7398 +       ASSERT(usb_pipein(urb->pipe));
7399 +
7400 +       /* Disable it and wait for it to stop */
7401 +       TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
7402 +       
7403 +       /* Ah, the luxury of busy-wait. */
7404 +       while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
7405 +             (timeout-- > 0));
7406 +       if(timeout == 0) {
7407 +         warn("Timeout while waiting for DMA-TX-Isoc to leave EP for epid:%d\n", epid);
7408 +       }
7409 +      }
7410 +
7411 +      /* Unlink SB to say that epid is finished. */
7412 +      TxIsocEPList[epid].sub = 0;
7413 +      TxIsocEPList[epid].hw_len = 0;
7414 +
7415 +      /* No URB active for EP anymore */
7416 +      activeUrbList[epid] = NULL;
7417 +    }
7418 +  } else { /* Finishing of not active URB (queued up with SBs thought) */
7419 +    isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d,"
7420 +             " SB queued but not active\n",
7421 +             (unsigned int)urb, str_dir(urb->pipe),
7422 +             urb_priv->isoc_packet_counter, urb->number_of_packets,
7423 +             urb->status);
7424 +    if(usb_pipeout(urb->pipe)) {
7425 +      /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */
7426 +      struct USB_SB_Desc *iter_sb, *prev_sb, *next_sb;
7427 +
7428 +      iter_sb = TxIsocEPList[epid].sub ?
7429 +       phys_to_virt(TxIsocEPList[epid].sub) : 0;
7430 +      prev_sb = 0;
7431 +
7432 +      /* SB that is linked before this URBs first SB */
7433 +      while (iter_sb && (iter_sb != urb_priv->first_sb)) {
7434 +       prev_sb = iter_sb;
7435 +       iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
7436 +      }
7437 +
7438 +      if (iter_sb == 0) {
7439 +       /* Unlink of the URB currently being transmitted. */
7440 +       prev_sb = 0;
7441 +       iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
7442 +      }
7443 +
7444 +      while (iter_sb && (iter_sb != urb_priv->last_sb)) {
7445 +       iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
7446 +      }
7447 +
7448 +      if (iter_sb) {
7449 +       next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
7450 +      } else {
7451 +       /* This should only happen if the DMA has completed
7452 +          processing the SB list for this EP while interrupts
7453 +          are disabled. */
7454 +       isoc_dbg("Isoc urb not found, already sent?\n");
7455 +       next_sb = 0;
7456 +      }
7457 +      if (prev_sb) {
7458 +       prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
7459 +      } else {
7460 +       TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
7461 +      }
7462 +    }
7463 +  }
7464 +
7465 +  /* Free HC-private URB data*/
7466 +  urb_priv_free(hcd, urb);
7467 +
7468 +  usb_release_bandwidth(urb->dev, urb, 0);
7469 +
7470 +  /*  Hand the URB from HCD to its USB device driver, using its completion
7471 +      functions */
7472 +  usb_hcd_giveback_urb (hcd, urb);
7473 +}
7474 +
7475 +static __u32 urb_num = 0;
7476 +
7477 +/* allocate and initialize URB private data */
7478 +static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
7479 +                          int mem_flags) {
7480 +  struct crisv10_urb_priv *urb_priv;
7481 +  
7482 +  urb_priv = kmalloc(sizeof *urb_priv, mem_flags);
7483 +  if (!urb_priv)
7484 +    return -ENOMEM;
7485 +  memset(urb_priv, 0, sizeof *urb_priv);
7486 +
7487 +  urb_priv->epid = epid;
7488 +  urb_priv->urb_state = NOT_STARTED;
7489 +
7490 +  urb->hcpriv = urb_priv;
7491 +  /* Assign URB a sequence number, and increment counter */
7492 +  urb_priv->urb_num = urb_num;
7493 +  urb_num++;
7494 +  return 0;
7495 +}
7496 +
7497 +/* free URB private data */
7498 +static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb) {
7499 +  int i;
7500 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
7501 +  ASSERT(urb_priv != 0);
7502 +
7503 +  /* Check it has any SBs linked that needs to be freed*/
7504 +  if(urb_priv->first_sb != NULL) {
7505 +    struct USB_SB_Desc *next_sb, *first_sb, *last_sb;
7506 +    int i = 0;
7507 +    first_sb = urb_priv->first_sb;
7508 +    last_sb = urb_priv->last_sb;
7509 +    ASSERT(last_sb);
7510 +    while(first_sb != last_sb) {
7511 +      next_sb = (struct USB_SB_Desc *)phys_to_virt(first_sb->next);
7512 +      kmem_cache_free(usb_desc_cache, first_sb);
7513 +      first_sb = next_sb;
7514 +      i++;
7515 +    }
7516 +    kmem_cache_free(usb_desc_cache, last_sb);
7517 +    i++;
7518 +  }
7519 +
7520 +  /* Check if it has any EPs in its Intr pool that also needs to be freed */
7521 +  if(urb_priv->intr_ep_pool_length > 0) {
7522 +    for(i = 0; i < urb_priv->intr_ep_pool_length; i++) {
7523 +      kfree(urb_priv->intr_ep_pool[i]);
7524 +    }
7525 +    /*
7526 +    tc_dbg("Freed %d EPs from URB:0x%x EP pool\n",
7527 +            urb_priv->intr_ep_pool_length, (unsigned int)urb);
7528 +    */
7529 +  }
7530 +
7531 +  kfree(urb_priv);
7532 +  urb->hcpriv = NULL;
7533 +}
7534 +
7535 +static int ep_priv_create(struct usb_host_endpoint *ep, int mem_flags) {
7536 +  struct crisv10_ep_priv *ep_priv;
7537 +  
7538 +  ep_priv = kmalloc(sizeof *ep_priv, mem_flags);
7539 +  if (!ep_priv)
7540 +    return -ENOMEM;
7541 +  memset(ep_priv, 0, sizeof *ep_priv);
7542 +
7543 +  ep->hcpriv = ep_priv;
7544 +  return 0;
7545 +}
7546 +
7547 +static void ep_priv_free(struct usb_host_endpoint *ep) {
7548 +  struct crisv10_ep_priv *ep_priv = ep->hcpriv;
7549 +  ASSERT(ep_priv);
7550 +  kfree(ep_priv);
7551 +  ep->hcpriv = NULL;
7552 +}
7553 +
7554 +/* EPID handling functions, managing EP-list in Etrax through wrappers */
7555 +/* ------------------------------------------------------------------- */
7556 +
7557 +/* Sets up a new EPID for an endpoint or returns existing if found */
7558 +static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
7559 +                        int mem_flags) {
7560 +  int epid;
7561 +  char devnum, endpoint, out_traffic, slow;
7562 +  int maxlen;
7563 +  __u32 epid_data;
7564 +  struct crisv10_ep_priv *ep_priv = ep->hcpriv;
7565 +  
7566 +  DBFENTER;
7567 +  
7568 +  /* Check if a valid epid already is setup for this endpoint */
7569 +  if(ep_priv != NULL) {
7570 +    return ep_priv->epid;
7571 +  }
7572 +
7573 +  /* We must find and initiate a new epid for this urb. */
7574 +  epid = tc_allocate_epid();
7575 +  
7576 +  if (epid == -1) {
7577 +    /* Failed to allocate a new epid. */
7578 +    DBFEXIT;
7579 +    return epid;
7580 +  }
7581 +  
7582 +  /* We now have a new epid to use. Claim it. */
7583 +  epid_state[epid].inuse = 1;
7584 +  
7585 +  /* Init private data for new endpoint */
7586 +  if(ep_priv_create(ep, mem_flags) != 0) {
7587 +    return -ENOMEM;
7588 +  }
7589 +  ep_priv = ep->hcpriv;
7590 +  ep_priv->epid = epid;
7591 +
7592 +  devnum = usb_pipedevice(urb->pipe);
7593 +  endpoint = usb_pipeendpoint(urb->pipe);
7594 +  slow = (urb->dev->speed == USB_SPEED_LOW);
7595 +  maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
7596 +
7597 +  if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
7598 +    /* We want both IN and OUT control traffic to be put on the same
7599 +       EP/SB list. */
7600 +    out_traffic = 1;
7601 +  } else {
7602 +    out_traffic = usb_pipeout(urb->pipe);
7603 +  }
7604 +    
7605 +  if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
7606 +    epid_data = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) |
7607 +      /* FIXME: Change any to the actual port? */
7608 +      IO_STATE(R_USB_EPT_DATA_ISO, port, any) |
7609 +      IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) |
7610 +      IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) |
7611 +      IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum);
7612 +    etrax_epid_iso_set(epid, epid_data);
7613 +  } else {
7614 +    epid_data = IO_STATE(R_USB_EPT_DATA, valid, yes) |
7615 +      IO_FIELD(R_USB_EPT_DATA, low_speed, slow) |
7616 +      /* FIXME: Change any to the actual port? */
7617 +      IO_STATE(R_USB_EPT_DATA, port, any) |
7618 +      IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) |
7619 +      IO_FIELD(R_USB_EPT_DATA, ep, endpoint) |
7620 +      IO_FIELD(R_USB_EPT_DATA, dev, devnum);
7621 +    etrax_epid_set(epid, epid_data);
7622 +  }
7623 +  
7624 +  epid_state[epid].out_traffic = out_traffic;
7625 +  epid_state[epid].type = usb_pipetype(urb->pipe);
7626 +
7627 +  tc_warn("Setting up ep:0x%x epid:%d (addr:%d endp:%d max_len:%d %s %s %s)\n",
7628 +         (unsigned int)ep, epid, devnum, endpoint, maxlen,
7629 +         str_type(urb->pipe), out_traffic ? "out" : "in",
7630 +         slow ? "low" : "full");
7631 +
7632 +  /* Enable Isoc eof interrupt if we set up the first Isoc epid */
7633 +  if(usb_pipeisoc(urb->pipe)) {
7634 +    isoc_epid_counter++;
7635 +    if(isoc_epid_counter == 1) {
7636 +      isoc_warn("Enabled Isoc eof interrupt\n");
7637 +      *R_USB_IRQ_MASK_SET |= IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
7638 +    }
7639 +  }
7640 +
7641 +  DBFEXIT;
7642 +  return epid;
7643 +}
7644 +
7645 +static void tc_free_epid(struct usb_host_endpoint *ep) {
7646 +  unsigned long flags;
7647 +  struct crisv10_ep_priv *ep_priv = ep->hcpriv;
7648 +  int epid;
7649 +  volatile int timeout = 10000;
7650 +
7651 +  DBFENTER;
7652 +
7653 +  if (ep_priv == NULL) {
7654 +    tc_warn("Trying to free unused epid on ep:0x%x\n", (unsigned int)ep);
7655 +    DBFEXIT;
7656 +    return;
7657 +  }
7658 +
7659 +  epid = ep_priv->epid;
7660 +
7661 +  /* Disable Isoc eof interrupt if we free the last Isoc epid */
7662 +  if(epid_isoc(epid)) {
7663 +    ASSERT(isoc_epid_counter > 0);
7664 +    isoc_epid_counter--;
7665 +    if(isoc_epid_counter == 0) {
7666 +      *R_USB_IRQ_MASK_SET &= ~IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
7667 +      isoc_warn("Disabled Isoc eof interrupt\n");
7668 +    }
7669 +  }
7670 +
7671 +  /* Take lock manualy instead of in epid_x_x wrappers,
7672 +     because we need to be polling here */
7673 +  spin_lock_irqsave(&etrax_epid_lock, flags);
7674 +  
7675 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
7676 +  nop();
7677 +  while((*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) &&
7678 +       (timeout-- > 0));
7679 +  if(timeout == 0) {
7680 +    warn("Timeout while waiting for epid:%d to drop hold\n", epid);
7681 +  }
7682 +  /* This will, among other things, set the valid field to 0. */
7683 +  *R_USB_EPT_DATA = 0;
7684 +  spin_unlock_irqrestore(&etrax_epid_lock, flags);
7685 +  
7686 +  /* Free resource in software state info list */
7687 +  epid_state[epid].inuse = 0;
7688 +
7689 +  /* Free private endpoint data */
7690 +  ep_priv_free(ep);
7691 +  
7692 +  DBFEXIT;
7693 +}
7694 +
7695 +static int tc_allocate_epid(void) {
7696 +  int i;
7697 +  DBFENTER;
7698 +  for (i = 0; i < NBR_OF_EPIDS; i++) {
7699 +    if (!epid_inuse(i)) {
7700 +      DBFEXIT;
7701 +      return i;
7702 +    }
7703 +  }
7704 +  
7705 +  tc_warn("Found no free epids\n");
7706 +  DBFEXIT;
7707 +  return -1;
7708  }
7709  
7710  
7711 -static void init_tx_intr_ep(void)
7712 -{
7713 -       int i;
7714 +/* Wrappers around the list functions (include/linux/list.h). */
7715 +/* ---------------------------------------------------------- */
7716 +static inline int __urb_list_empty(int epid) {
7717 +  int retval;
7718 +  retval = list_empty(&urb_list[epid]);
7719 +  return retval;
7720 +}
7721  
7722 -       DBFENTER;
7723 +/* Returns first urb for this epid, or NULL if list is empty. */
7724 +static inline struct urb *urb_list_first(int epid) {
7725 +  unsigned long flags;
7726 +  struct urb *first_urb = 0;
7727 +  spin_lock_irqsave(&urb_list_lock, flags);
7728 +  if (!__urb_list_empty(epid)) {
7729 +    /* Get the first urb (i.e. head->next). */
7730 +    urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list);
7731 +    first_urb = urb_entry->urb;
7732 +  }
7733 +  spin_unlock_irqrestore(&urb_list_lock, flags);
7734 +  return first_urb;
7735 +}
7736  
7737 -       /* Read comment at zout_buffer declaration for an explanation to this. */
7738 -       TxIntrSB_zout.sw_len = 1;
7739 -       TxIntrSB_zout.next = 0;
7740 -       TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]);
7741 -       TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
7742 -                                IO_STATE(USB_SB_command, tt, zout) |
7743 -                                IO_STATE(USB_SB_command, full, yes) |
7744 -                                IO_STATE(USB_SB_command, eot, yes) |
7745 -                                IO_STATE(USB_SB_command, eol, yes));
7746 -
7747 -       for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) {
7748 -               CHECK_ALIGN(&TxIntrEPList[i]);
7749 -               TxIntrEPList[i].hw_len = 0;
7750 -               TxIntrEPList[i].command =
7751 -                       (IO_STATE(USB_EP_command, eof, yes) |
7752 -                        IO_STATE(USB_EP_command, enable, yes) |
7753 -                        IO_FIELD(USB_EP_command, epid, INVALID_EPID));
7754 -               TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
7755 -               TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]);
7756 -       }
7757 +/* Adds an urb_entry last in the list for this epid. */
7758 +static inline void urb_list_add(struct urb *urb, int epid, int mem_flags) {
7759 +  unsigned long flags;
7760 +  urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), mem_flags);
7761 +  ASSERT(urb_entry);
7762 +  
7763 +  urb_entry->urb = urb;
7764 +  spin_lock_irqsave(&urb_list_lock, flags);
7765 +  list_add_tail(&urb_entry->list, &urb_list[epid]);
7766 +  spin_unlock_irqrestore(&urb_list_lock, flags);
7767 +}
7768  
7769 -       CHECK_ALIGN(&TxIntrEPList[i]);
7770 -       TxIntrEPList[i].hw_len = 0;
7771 -       TxIntrEPList[i].command =
7772 -               (IO_STATE(USB_EP_command, eof, yes) |
7773 -                IO_STATE(USB_EP_command, eol, yes) |
7774 -                IO_STATE(USB_EP_command, enable, yes) |
7775 -                IO_FIELD(USB_EP_command, epid, INVALID_EPID));
7776 -       TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
7777 -       TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]);
7778 -
7779 -       *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]);
7780 -       *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
7781 -       DBFEXIT;
7782 +/* Search through the list for an element that contains this urb. (The list
7783 +   is expected to be short and the one we are about to delete will often be
7784 +   the first in the list.)
7785 +   Should be protected by spin_locks in calling function */
7786 +static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid) {
7787 +  struct list_head *entry;
7788 +  struct list_head *tmp;
7789 +  urb_entry_t *urb_entry;
7790 +  
7791 +  list_for_each_safe(entry, tmp, &urb_list[epid]) {
7792 +    urb_entry = list_entry(entry, urb_entry_t, list);
7793 +    ASSERT(urb_entry);
7794 +    ASSERT(urb_entry->urb);
7795 +    
7796 +    if (urb_entry->urb == urb) {
7797 +      return urb_entry;
7798 +    }
7799 +  }
7800 +  return 0;
7801 +}
7802 +
7803 +/* Same function as above but for global use. Protects list by spinlock */
7804 +static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid) {
7805 +  unsigned long flags;
7806 +  urb_entry_t *urb_entry;
7807 +  spin_lock_irqsave(&urb_list_lock, flags);
7808 +  urb_entry = __urb_list_entry(urb, epid);
7809 +  spin_unlock_irqrestore(&urb_list_lock, flags);
7810 +  return (urb_entry);
7811  }
7812  
7813 -static void init_tx_isoc_ep(void)
7814 -{
7815 -       int i;
7816 +/* Delete an urb from the list. */
7817 +static inline void urb_list_del(struct urb *urb, int epid) {
7818 +  unsigned long flags;
7819 +  urb_entry_t *urb_entry;
7820 +
7821 +  /* Delete entry and free. */
7822 +  spin_lock_irqsave(&urb_list_lock, flags);
7823 +  urb_entry = __urb_list_entry(urb, epid);
7824 +  ASSERT(urb_entry);
7825 +
7826 +  list_del(&urb_entry->list);
7827 +  spin_unlock_irqrestore(&urb_list_lock, flags);
7828 +  kfree(urb_entry);
7829 +}
7830  
7831 -       DBFENTER;
7832 +/* Move an urb to the end of the list. */
7833 +static inline void urb_list_move_last(struct urb *urb, int epid) {
7834 +  unsigned long flags;
7835 +  urb_entry_t *urb_entry;
7836 +  
7837 +  spin_lock_irqsave(&urb_list_lock, flags);
7838 +  urb_entry = __urb_list_entry(urb, epid);
7839 +  ASSERT(urb_entry);
7840 +
7841 +  list_del(&urb_entry->list);
7842 +  list_add_tail(&urb_entry->list, &urb_list[epid]);
7843 +  spin_unlock_irqrestore(&urb_list_lock, flags);
7844 +}
7845  
7846 -       /* Read comment at zout_buffer declaration for an explanation to this. */
7847 -       TxIsocSB_zout.sw_len = 1;
7848 -       TxIsocSB_zout.next = 0;
7849 -       TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]);
7850 -       TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
7851 -                                IO_STATE(USB_SB_command, tt, zout) |
7852 -                                IO_STATE(USB_SB_command, full, yes) |
7853 -                                IO_STATE(USB_SB_command, eot, yes) |
7854 -                                IO_STATE(USB_SB_command, eol, yes));
7855 -
7856 -       /* The last isochronous EP descriptor is a dummy. */
7857 -
7858 -       for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
7859 -               CHECK_ALIGN(&TxIsocEPList[i]);
7860 -               TxIsocEPList[i].hw_len = 0;
7861 -               TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
7862 -               TxIsocEPList[i].sub = 0;
7863 -               TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]);
7864 +/* Get the next urb in the list. */
7865 +static inline struct urb *urb_list_next(struct urb *urb, int epid) {
7866 +  unsigned long flags;
7867 +  urb_entry_t *urb_entry;
7868 +
7869 +  spin_lock_irqsave(&urb_list_lock, flags);
7870 +  urb_entry = __urb_list_entry(urb, epid);
7871 +  ASSERT(urb_entry);
7872 +
7873 +  if (urb_entry->list.next != &urb_list[epid]) {
7874 +    struct list_head *elem = urb_entry->list.next;
7875 +    urb_entry = list_entry(elem, urb_entry_t, list);
7876 +    spin_unlock_irqrestore(&urb_list_lock, flags);
7877 +    return urb_entry->urb;
7878 +  } else {
7879 +    spin_unlock_irqrestore(&urb_list_lock, flags);
7880 +    return NULL;
7881 +  }
7882 +}
7883 +
7884 +struct USB_EP_Desc* create_ep(int epid, struct USB_SB_Desc* sb_desc,
7885 +                             int mem_flags) {
7886 +  struct USB_EP_Desc *ep_desc;
7887 +  ep_desc = (struct USB_EP_Desc *) kmem_cache_alloc(usb_desc_cache, mem_flags);
7888 +  if(ep_desc == NULL)
7889 +    return NULL;
7890 +  memset(ep_desc, 0, sizeof(struct USB_EP_Desc));
7891 +
7892 +  ep_desc->hw_len = 0;
7893 +  ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) |
7894 +                     IO_STATE(USB_EP_command, enable, yes));
7895 +  if(sb_desc == NULL) {
7896 +    ep_desc->sub = 0;
7897 +  } else {
7898 +    ep_desc->sub = virt_to_phys(sb_desc);
7899 +  }
7900 +  return ep_desc;
7901 +}
7902 +
7903 +#define TT_ZOUT  0
7904 +#define TT_IN    1
7905 +#define TT_OUT   2
7906 +#define TT_SETUP 3
7907 +
7908 +#define CMD_EOL  IO_STATE(USB_SB_command, eol, yes)
7909 +#define CMD_INTR IO_STATE(USB_SB_command, intr, yes)
7910 +#define CMD_FULL IO_STATE(USB_SB_command, full, yes)
7911 +
7912 +/* Allocation and setup of a generic SB. Used to create SETUP, OUT and ZOUT
7913 +   SBs. Also used by create_sb_in() to avoid same allocation procedure at two
7914 +   places */
7915 +struct USB_SB_Desc* create_sb(struct USB_SB_Desc* sb_prev, int tt, void* data,
7916 +                             int datalen, int mem_flags) {
7917 +  struct USB_SB_Desc *sb_desc;
7918 +  sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
7919 +  if(sb_desc == NULL)
7920 +    return NULL;
7921 +  memset(sb_desc, 0, sizeof(struct USB_SB_Desc));
7922 +
7923 +  sb_desc->command = IO_FIELD(USB_SB_command, tt, tt) |
7924 +                     IO_STATE(USB_SB_command, eot, yes);
7925 +
7926 +  sb_desc->sw_len = datalen;
7927 +  if(data != NULL) {
7928 +    sb_desc->buf = virt_to_phys(data);
7929 +  } else {
7930 +    sb_desc->buf = 0;
7931 +  }
7932 +  if(sb_prev != NULL) {
7933 +    sb_prev->next = virt_to_phys(sb_desc);
7934 +  }
7935 +  return sb_desc;
7936 +}
7937 +
7938 +/* Creates a copy of an existing SB by allocation space for it and copy
7939 +   settings */
7940 +struct USB_SB_Desc* create_sb_copy(struct USB_SB_Desc* sb_orig, int mem_flags) {
7941 +  struct USB_SB_Desc *sb_desc;
7942 +  sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
7943 +  if(sb_desc == NULL)
7944 +    return NULL;
7945 +
7946 +  memcpy(sb_desc, sb_orig, sizeof(struct USB_SB_Desc));
7947 +  return sb_desc;
7948 +}
7949 +
7950 +/* A specific create_sb function for creation of in SBs. This is due to
7951 +   that datalen in In SBs shows how many packets we are expecting. It also
7952 +   sets up the rem field to show if how many bytes we expect in last packet
7953 +   if it's not a full one */
7954 +struct USB_SB_Desc* create_sb_in(struct USB_SB_Desc* sb_prev, int datalen,
7955 +                                int maxlen, int mem_flags) {
7956 +  struct USB_SB_Desc *sb_desc;
7957 +  sb_desc = create_sb(sb_prev, TT_IN, NULL,
7958 +                     datalen ? (datalen - 1) / maxlen + 1 : 0, mem_flags);
7959 +  if(sb_desc == NULL)
7960 +    return NULL;
7961 +  sb_desc->command |= IO_FIELD(USB_SB_command, rem, datalen % maxlen);
7962 +  return sb_desc;
7963 +}
7964 +
7965 +void set_sb_cmds(struct USB_SB_Desc *sb_desc, __u16 flags) {
7966 +  sb_desc->command |= flags;
7967 +}
7968 +
7969 +int create_sb_for_urb(struct urb *urb, int mem_flags) {
7970 +  int is_out = !usb_pipein(urb->pipe);
7971 +  int type = usb_pipetype(urb->pipe);
7972 +  int maxlen = usb_maxpacket(urb->dev, urb->pipe, is_out);
7973 +  int buf_len = urb->transfer_buffer_length;
7974 +  void *buf = buf_len > 0 ? urb->transfer_buffer : NULL;
7975 +  struct USB_SB_Desc *sb_desc = NULL;
7976 +
7977 +  struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
7978 +  ASSERT(urb_priv != NULL);
7979 +
7980 +  switch(type) {
7981 +  case PIPE_CONTROL:
7982 +    /* Setup stage */
7983 +    sb_desc = create_sb(NULL, TT_SETUP, urb->setup_packet, 8, mem_flags);
7984 +    if(sb_desc == NULL)
7985 +      return -ENOMEM;
7986 +    set_sb_cmds(sb_desc, CMD_FULL);
7987 +
7988 +    /* Attach first SB to URB */
7989 +    urb_priv->first_sb = sb_desc;    
7990 +
7991 +    if (is_out) { /* Out Control URB */
7992 +      /* If this Control OUT transfer has an optional data stage we add
7993 +        an OUT token before the mandatory IN (status) token */
7994 +      if ((buf_len > 0) && buf) {
7995 +       sb_desc = create_sb(sb_desc, TT_OUT, buf, buf_len, mem_flags);
7996 +       if(sb_desc == NULL)
7997 +         return -ENOMEM;
7998 +       set_sb_cmds(sb_desc, CMD_FULL);
7999 +      }
8000 +
8001 +      /* Status stage */
8002 +      /* The data length has to be exactly 1. This is due to a requirement
8003 +         of the USB specification that a host must be prepared to receive
8004 +         data in the status phase */
8005 +      sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
8006 +      if(sb_desc == NULL)
8007 +       return -ENOMEM;
8008 +    } else { /* In control URB */
8009 +      /* Data stage */
8010 +      sb_desc = create_sb_in(sb_desc, buf_len, maxlen, mem_flags);
8011 +      if(sb_desc == NULL)
8012 +       return -ENOMEM;
8013 +
8014 +      /* Status stage */
8015 +      /* Read comment at zout_buffer declaration for an explanation to this. */
8016 +      sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
8017 +      if(sb_desc == NULL)
8018 +       return -ENOMEM;
8019 +      /* Set descriptor interrupt flag for in URBs so we can finish URB after
8020 +         zout-packet has been sent */
8021 +      set_sb_cmds(sb_desc, CMD_INTR | CMD_FULL);
8022 +    }
8023 +    /* Set end-of-list flag in last SB */
8024 +    set_sb_cmds(sb_desc, CMD_EOL);
8025 +    /* Attach last SB to URB */
8026 +    urb_priv->last_sb = sb_desc;
8027 +    break;
8028 +
8029 +  case PIPE_BULK:
8030 +    if (is_out) { /* Out Bulk URB */
8031 +      sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
8032 +      if(sb_desc == NULL)
8033 +       return -ENOMEM;
8034 +      /* The full field is set to yes, even if we don't actually check that
8035 +        this is a full-length transfer (i.e., that transfer_buffer_length %
8036 +        maxlen = 0).
8037 +        Setting full prevents the USB controller from sending an empty packet
8038 +        in that case.  However, if URB_ZERO_PACKET was set we want that. */
8039 +      if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
8040 +       set_sb_cmds(sb_desc, CMD_FULL);
8041 +      }
8042 +    } else { /* In Bulk URB */
8043 +      sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
8044 +      if(sb_desc == NULL)
8045 +       return -ENOMEM;
8046 +    }
8047 +    /* Set end-of-list flag for last SB */
8048 +    set_sb_cmds(sb_desc, CMD_EOL);
8049 +
8050 +    /* Attach SB to URB */
8051 +    urb_priv->first_sb = sb_desc;
8052 +    urb_priv->last_sb = sb_desc;
8053 +    break;
8054 +
8055 +  case PIPE_INTERRUPT:
8056 +    if(is_out) { /* Out Intr URB */
8057 +      sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
8058 +      if(sb_desc == NULL)
8059 +       return -ENOMEM;
8060 +
8061 +      /* The full field is set to yes, even if we don't actually check that
8062 +        this is a full-length transfer (i.e., that transfer_buffer_length %
8063 +        maxlen = 0).
8064 +        Setting full prevents the USB controller from sending an empty packet
8065 +        in that case.  However, if URB_ZERO_PACKET was set we want that. */
8066 +      if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
8067 +       set_sb_cmds(sb_desc, CMD_FULL);
8068 +      }
8069 +      /* Only generate TX interrupt if it's a Out URB*/
8070 +      set_sb_cmds(sb_desc, CMD_INTR);
8071 +
8072 +    } else { /* In Intr URB */
8073 +      sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
8074 +      if(sb_desc == NULL)
8075 +       return -ENOMEM;
8076 +    }
8077 +    /* Set end-of-list flag for last SB */
8078 +    set_sb_cmds(sb_desc, CMD_EOL);
8079 +
8080 +    /* Attach SB to URB */
8081 +    urb_priv->first_sb = sb_desc;
8082 +    urb_priv->last_sb = sb_desc;
8083 +
8084 +    break;
8085 +  case PIPE_ISOCHRONOUS:
8086 +    if(is_out) { /* Out Isoc URB */
8087 +      int i;
8088 +      if(urb->number_of_packets == 0) {
8089 +       tc_err("Can't create SBs for Isoc URB with zero packets\n");
8090 +       return -EPIPE;
8091 +      }
8092 +      /* Create one SB descriptor for each packet and link them together. */
8093 +      for(i = 0; i < urb->number_of_packets; i++) {
8094 +       if (urb->iso_frame_desc[i].length > 0) {
8095 +
8096 +         sb_desc = create_sb(sb_desc, TT_OUT, urb->transfer_buffer +
8097 +                             urb->iso_frame_desc[i].offset,
8098 +                             urb->iso_frame_desc[i].length, mem_flags);
8099 +         if(sb_desc == NULL)
8100 +           return -ENOMEM;
8101 +
8102 +         /* Check if it's a full length packet */
8103 +         if (urb->iso_frame_desc[i].length ==
8104 +             usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
8105 +           set_sb_cmds(sb_desc, CMD_FULL);
8106 +         }
8107 +         
8108 +       } else { /* zero length packet */
8109 +         sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
8110 +         if(sb_desc == NULL)
8111 +           return -ENOMEM;
8112 +         set_sb_cmds(sb_desc, CMD_FULL);
8113 +       }
8114 +       /* Attach first SB descriptor to URB */
8115 +       if (i == 0) {
8116 +         urb_priv->first_sb = sb_desc;
8117 +       }
8118 +      }
8119 +      /* Set interrupt and end-of-list flags in last SB */
8120 +      set_sb_cmds(sb_desc, CMD_INTR | CMD_EOL);
8121 +      /* Attach last SB descriptor to URB */
8122 +      urb_priv->last_sb = sb_desc;
8123 +      tc_dbg("Created %d out SBs for Isoc URB:0x%x\n",
8124 +              urb->number_of_packets, (unsigned int)urb);
8125 +    } else { /* In Isoc URB */
8126 +      /* Actual number of packets is not relevant for periodic in traffic as
8127 +        long as it is more than zero.  Set to 1 always. */
8128 +      sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
8129 +      if(sb_desc == NULL)
8130 +       return -ENOMEM;
8131 +      /* Set end-of-list flags for SB */
8132 +      set_sb_cmds(sb_desc, CMD_EOL);
8133 +
8134 +      /* Attach SB to URB */
8135 +      urb_priv->first_sb = sb_desc;
8136 +      urb_priv->last_sb = sb_desc;
8137 +    }
8138 +    break;
8139 +  default:
8140 +    tc_err("Unknown pipe-type\n");
8141 +    return -EPIPE;
8142 +    break;
8143 +  }
8144 +  return 0;
8145 +}
8146 +
8147 +int init_intr_urb(struct urb *urb, int mem_flags) {
8148 +  struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
8149 +  struct USB_EP_Desc* ep_desc;
8150 +  int interval;
8151 +  int i;
8152 +  int ep_count;
8153 +
8154 +  ASSERT(urb_priv != NULL);
8155 +  ASSERT(usb_pipeint(urb->pipe));
8156 +  /* We can't support interval longer than amount of eof descriptors in
8157 +     TxIntrEPList */
8158 +  if(urb->interval > MAX_INTR_INTERVAL) {
8159 +    tc_err("Interrupt interval %dms too big (max: %dms)\n", urb->interval,
8160 +          MAX_INTR_INTERVAL);
8161 +    return -EINVAL;
8162 +  }
8163 +
8164 +  /* We assume that the SB descriptors already have been setup */
8165 +  ASSERT(urb_priv->first_sb != NULL);
8166 +
8167 +  /* Round of the interval to 2^n, it is obvious that this code favours
8168 +     smaller numbers, but that is actually a good thing */
8169 +  /* FIXME: The "rounding error" for larger intervals will be quite
8170 +     large. For in traffic this shouldn't be a problem since it will only
8171 +     mean that we "poll" more often. */
8172 +  interval = urb->interval;
8173 +  for (i = 0; interval; i++) {
8174 +    interval = interval >> 1;
8175 +  }
8176 +  urb_priv->interval = 1 << (i - 1);
8177 +
8178 +  /* We can only have max interval for Out Interrupt due to that we can only
8179 +     handle one linked in EP for a certain epid in the Intr descr array at the
8180 +     time. The USB Controller in the Etrax 100LX continues to process Intr EPs
8181 +     so we have no way of knowing which one that caused the actual transfer if
8182 +     we have several linked in. */
8183 +  if(usb_pipeout(urb->pipe)) {
8184 +    urb_priv->interval = MAX_INTR_INTERVAL;
8185 +  }
8186 +
8187 +  /* Calculate amount of EPs needed */
8188 +  ep_count = MAX_INTR_INTERVAL / urb_priv->interval;
8189 +
8190 +  for(i = 0; i < ep_count; i++) {
8191 +    ep_desc = create_ep(urb_priv->epid, urb_priv->first_sb, mem_flags);
8192 +    if(ep_desc == NULL) {
8193 +      /* Free any descriptors that we may have allocated before failure */
8194 +      while(i > 0) {
8195 +       i--;
8196 +       kfree(urb_priv->intr_ep_pool[i]);
8197 +      }
8198 +      return -ENOMEM;
8199 +    }
8200 +    urb_priv->intr_ep_pool[i] = ep_desc;
8201 +  }
8202 +  urb_priv->intr_ep_pool_length = ep_count;
8203 +  return 0;
8204 +}
8205 +
8206 +/* DMA RX/TX functions */
8207 +/* ----------------------- */
8208 +
8209 +static void tc_dma_init_rx_list(void) {
8210 +  int i;
8211 +
8212 +  /* Setup descriptor list except last one */
8213 +  for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) {
8214 +    RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
8215 +    RxDescList[i].command = 0;
8216 +    RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]);
8217 +    RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
8218 +    RxDescList[i].hw_len = 0;
8219 +    RxDescList[i].status = 0;
8220 +    
8221 +    /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as
8222 +       USB_IN_Desc for the relevant fields.) */
8223 +    prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]);
8224 +    
8225 +  }
8226 +  /* Special handling of last descriptor */
8227 +  RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
8228 +  RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes);
8229 +  RxDescList[i].next = virt_to_phys(&RxDescList[0]);
8230 +  RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
8231 +  RxDescList[i].hw_len = 0;
8232 +  RxDescList[i].status = 0;
8233 +  
8234 +  /* Setup list pointers that show progress in list */
8235 +  myNextRxDesc = &RxDescList[0];
8236 +  myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
8237 +  
8238 +  flush_etrax_cache();
8239 +  /* Point DMA to first descriptor in list and start it */
8240 +  *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc);
8241 +  *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start);
8242 +}
8243 +
8244 +
8245 +static void tc_dma_init_tx_bulk_list(void) {
8246 +  int i;
8247 +  volatile struct USB_EP_Desc *epDescr;
8248 +
8249 +  for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
8250 +    epDescr = &(TxBulkEPList[i]);
8251 +    CHECK_ALIGN(epDescr);
8252 +    epDescr->hw_len = 0;
8253 +    epDescr->command = IO_FIELD(USB_EP_command, epid, i);
8254 +    epDescr->sub = 0;
8255 +    epDescr->next = virt_to_phys(&TxBulkEPList[i + 1]);
8256 +
8257 +    /* Initiate two EPs, disabled and with the eol flag set. No need for any
8258 +       preserved epid. */
8259 +    
8260 +    /* The first one has the intr flag set so we get an interrupt when the DMA
8261 +       channel is about to become disabled. */
8262 +    CHECK_ALIGN(&TxBulkDummyEPList[i][0]);
8263 +    TxBulkDummyEPList[i][0].hw_len = 0;
8264 +    TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
8265 +                                      IO_STATE(USB_EP_command, eol, yes) |
8266 +                                      IO_STATE(USB_EP_command, intr, yes));
8267 +    TxBulkDummyEPList[i][0].sub = 0;
8268 +    TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]);
8269 +    
8270 +    /* The second one. */
8271 +    CHECK_ALIGN(&TxBulkDummyEPList[i][1]);
8272 +    TxBulkDummyEPList[i][1].hw_len = 0;
8273 +    TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
8274 +                                      IO_STATE(USB_EP_command, eol, yes));
8275 +    TxBulkDummyEPList[i][1].sub = 0;
8276 +    /* The last dummy's next pointer is the same as the current EP's next pointer. */
8277 +    TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]);
8278 +  }
8279 +
8280 +  /* Special handling of last descr in list, make list circular */
8281 +  epDescr = &TxBulkEPList[i];
8282 +  CHECK_ALIGN(epDescr);
8283 +  epDescr->hw_len = 0;
8284 +  epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
8285 +    IO_FIELD(USB_EP_command, epid, i);
8286 +  epDescr->sub = 0;
8287 +  epDescr->next = virt_to_phys(&TxBulkEPList[0]);
8288 +  
8289 +  /* Init DMA sub-channel pointers to last item in each list */
8290 +  *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]);
8291 +  /* No point in starting the bulk channel yet.
8292 +   *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
8293 +}
8294 +
8295 +static void tc_dma_init_tx_ctrl_list(void) {
8296 +  int i;
8297 +  volatile struct USB_EP_Desc *epDescr;
8298 +
8299 +  for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
8300 +    epDescr = &(TxCtrlEPList[i]);
8301 +    CHECK_ALIGN(epDescr);
8302 +    epDescr->hw_len = 0;
8303 +    epDescr->command = IO_FIELD(USB_EP_command, epid, i);
8304 +    epDescr->sub = 0;
8305 +    epDescr->next = virt_to_phys(&TxCtrlEPList[i + 1]);
8306 +  }
8307 +  /* Special handling of last descr in list, make list circular */
8308 +  epDescr = &TxCtrlEPList[i];
8309 +  CHECK_ALIGN(epDescr);
8310 +  epDescr->hw_len = 0;
8311 +  epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
8312 +    IO_FIELD(USB_EP_command, epid, i);
8313 +  epDescr->sub = 0;
8314 +  epDescr->next = virt_to_phys(&TxCtrlEPList[0]);
8315 +  
8316 +  /* Init DMA sub-channel pointers to last item in each list */
8317 +  *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[i]);
8318 +  /* No point in starting the ctrl channel yet.
8319 +   *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
8320 +}
8321 +
8322 +
8323 +static void tc_dma_init_tx_intr_list(void) {
8324 +  int i;
8325 +
8326 +  TxIntrSB_zout.sw_len = 1;
8327 +  TxIntrSB_zout.next = 0;
8328 +  TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]);
8329 +  TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
8330 +                          IO_STATE(USB_SB_command, tt, zout) |
8331 +                          IO_STATE(USB_SB_command, full, yes) |
8332 +                          IO_STATE(USB_SB_command, eot, yes) |
8333 +                          IO_STATE(USB_SB_command, eol, yes));
8334 +  
8335 +  for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) {
8336 +    CHECK_ALIGN(&TxIntrEPList[i]);
8337 +    TxIntrEPList[i].hw_len = 0;
8338 +    TxIntrEPList[i].command =
8339 +      (IO_STATE(USB_EP_command, eof, yes) |
8340 +       IO_STATE(USB_EP_command, enable, yes) |
8341 +       IO_FIELD(USB_EP_command, epid, INVALID_EPID));
8342 +    TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
8343 +    TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]);
8344 +  }
8345 +
8346 +  /* Special handling of last descr in list, make list circular */
8347 +  CHECK_ALIGN(&TxIntrEPList[i]);
8348 +  TxIntrEPList[i].hw_len = 0;
8349 +  TxIntrEPList[i].command =
8350 +    (IO_STATE(USB_EP_command, eof, yes) |
8351 +     IO_STATE(USB_EP_command, eol, yes) |
8352 +     IO_STATE(USB_EP_command, enable, yes) |
8353 +     IO_FIELD(USB_EP_command, epid, INVALID_EPID));
8354 +  TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
8355 +  TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]);
8356 +
8357 +  intr_dbg("Initiated Intr EP descriptor list\n");
8358 +
8359 +
8360 +  /* Connect DMA 8 sub-channel 2 to first in list */
8361 +  *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]);
8362 +}
8363 +
8364 +static void tc_dma_init_tx_isoc_list(void) {
8365 +  int i;
8366 +
8367 +  DBFENTER;
8368 +
8369 +  /* Read comment at zout_buffer declaration for an explanation to this. */
8370 +  TxIsocSB_zout.sw_len = 1;
8371 +  TxIsocSB_zout.next = 0;
8372 +  TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]);
8373 +  TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
8374 +                          IO_STATE(USB_SB_command, tt, zout) |
8375 +                          IO_STATE(USB_SB_command, full, yes) |
8376 +                          IO_STATE(USB_SB_command, eot, yes) |
8377 +                          IO_STATE(USB_SB_command, eol, yes));
8378 +
8379 +  /* The last isochronous EP descriptor is a dummy. */
8380 +  for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
8381 +    CHECK_ALIGN(&TxIsocEPList[i]);
8382 +    TxIsocEPList[i].hw_len = 0;
8383 +    TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
8384 +    TxIsocEPList[i].sub = 0;
8385 +    TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]);
8386 +  }
8387 +
8388 +  CHECK_ALIGN(&TxIsocEPList[i]);
8389 +  TxIsocEPList[i].hw_len = 0;
8390 +
8391 +  /* Must enable the last EP descr to get eof interrupt. */
8392 +  TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) |
8393 +                            IO_STATE(USB_EP_command, eof, yes) |
8394 +                            IO_STATE(USB_EP_command, eol, yes) |
8395 +                            IO_FIELD(USB_EP_command, epid, INVALID_EPID));
8396 +  TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout);
8397 +  TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]);
8398 +
8399 +  *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]);
8400 +  *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
8401 +}
8402 +
8403 +static int tc_dma_init(struct usb_hcd *hcd) {
8404 +  tc_dma_init_rx_list();
8405 +  tc_dma_init_tx_bulk_list();
8406 +  tc_dma_init_tx_ctrl_list();
8407 +  tc_dma_init_tx_intr_list();
8408 +  tc_dma_init_tx_isoc_list();
8409 +
8410 +  if (cris_request_dma(USB_TX_DMA_NBR,
8411 +                      "ETRAX 100LX built-in USB (Tx)",
8412 +                      DMA_VERBOSE_ON_ERROR,
8413 +                      dma_usb)) {
8414 +    err("Could not allocate DMA ch 8 for USB");
8415 +    return -EBUSY;
8416 +  }
8417 +       
8418 +  if (cris_request_dma(USB_RX_DMA_NBR,
8419 +                      "ETRAX 100LX built-in USB (Rx)",
8420 +                      DMA_VERBOSE_ON_ERROR,
8421 +                      dma_usb)) {
8422 +    err("Could not allocate DMA ch 9 for USB");
8423 +    return -EBUSY;
8424 +  }
8425 +
8426 +  *R_IRQ_MASK2_SET =
8427 +    /* Note that these interrupts are not used. */
8428 +    IO_STATE(R_IRQ_MASK2_SET, dma8_sub0_descr, set) |
8429 +    /* Sub channel 1 (ctrl) descr. interrupts are used. */
8430 +    IO_STATE(R_IRQ_MASK2_SET, dma8_sub1_descr, set) |
8431 +    IO_STATE(R_IRQ_MASK2_SET, dma8_sub2_descr, set) |
8432 +    /* Sub channel 3 (isoc) descr. interrupts are used. */
8433 +    IO_STATE(R_IRQ_MASK2_SET, dma8_sub3_descr, set);
8434 +  
8435 +  /* Note that the dma9_descr interrupt is not used. */
8436 +  *R_IRQ_MASK2_SET =
8437 +    IO_STATE(R_IRQ_MASK2_SET, dma9_eop, set) |
8438 +    IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set);
8439 +
8440 +  if (request_irq(ETRAX_USB_RX_IRQ, tc_dma_rx_interrupt, 0,
8441 +                 "ETRAX 100LX built-in USB (Rx)", hcd)) {
8442 +    err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ);
8443 +    return -EBUSY;
8444 +  }
8445 +  
8446 +  if (request_irq(ETRAX_USB_TX_IRQ, tc_dma_tx_interrupt, 0,
8447 +                 "ETRAX 100LX built-in USB (Tx)", hcd)) {
8448 +    err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ);
8449 +    return -EBUSY;
8450 +  }
8451 +
8452 +  return 0;
8453 +}
8454 +
8455 +static void tc_dma_destroy(void) {
8456 +  free_irq(ETRAX_USB_RX_IRQ, NULL);
8457 +  free_irq(ETRAX_USB_TX_IRQ, NULL);
8458 +
8459 +  cris_free_dma(USB_TX_DMA_NBR, "ETRAX 100LX built-in USB (Tx)");
8460 +  cris_free_dma(USB_RX_DMA_NBR, "ETRAX 100LX built-in USB (Rx)");
8461 +
8462 +}
8463 +
8464 +static void tc_dma_link_intr_urb(struct urb *urb);
8465 +
8466 +/* Handle processing of Bulk, Ctrl and Intr queues */
8467 +static void tc_dma_process_queue(int epid) {
8468 +  struct urb *urb;
8469 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
8470 +  unsigned long flags;
8471 +  char toggle;
8472 +
8473 +  if(epid_state[epid].disabled) {
8474 +    /* Don't process any URBs on a disabled endpoint */
8475 +    return;
8476 +  }
8477 +
8478 +  /* Do not disturb us while fiddling with EPs and epids */
8479 +  local_irq_save(flags);
8480 +
8481 +  /* For bulk, Ctrl and Intr can we only have one URB active at a time for
8482 +     a specific EP. */
8483 +  if(activeUrbList[epid] != NULL) {
8484 +    /* An URB is already active on EP, skip checking queue */
8485 +    local_irq_restore(flags);
8486 +    return;
8487 +  }
8488 +
8489 +  urb = urb_list_first(epid);
8490 +  if(urb == NULL) {
8491 +    /* No URB waiting in EP queue. Nothing do to */
8492 +    local_irq_restore(flags);
8493 +    return;
8494 +  }
8495 +
8496 +  urb_priv = urb->hcpriv;
8497 +  ASSERT(urb_priv != NULL);
8498 +  ASSERT(urb_priv->urb_state == NOT_STARTED);
8499 +  ASSERT(!usb_pipeisoc(urb->pipe));
8500 +
8501 +  /* Remove this URB from the queue and move it to active */
8502 +  activeUrbList[epid] = urb;
8503 +  urb_list_del(urb, epid);
8504 +
8505 +  urb_priv->urb_state = STARTED;
8506 +
8507 +  /* Reset error counters (regardless of which direction this traffic is). */
8508 +  etrax_epid_clear_error(epid);
8509 +
8510 +  /* Special handling of Intr EP lists */
8511 +  if(usb_pipeint(urb->pipe)) {
8512 +    tc_dma_link_intr_urb(urb);
8513 +    local_irq_restore(flags);
8514 +    return;
8515 +  }
8516 +
8517 +  /* Software must preset the toggle bits for Bulk and Ctrl */
8518 +  if(usb_pipecontrol(urb->pipe)) {
8519 +    /* Toggle bits are initialized only during setup transaction in a
8520 +       CTRL transfer */
8521 +    etrax_epid_set_toggle(epid, 0, 0);
8522 +    etrax_epid_set_toggle(epid, 1, 0);
8523 +  } else {
8524 +    toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
8525 +                          usb_pipeout(urb->pipe));
8526 +    etrax_epid_set_toggle(epid, usb_pipeout(urb->pipe), toggle);
8527 +  }
8528 +
8529 +  tc_dbg("Added SBs from (URB:0x%x %s %s) to epid %d: %s\n",
8530 +        (unsigned int)urb, str_dir(urb->pipe), str_type(urb->pipe), epid,
8531 +        sblist_to_str(urb_priv->first_sb));
8532 +
8533 +  /* We start the DMA sub channel without checking if it's running or not,
8534 +     because:
8535 +     1) If it's already running, issuing the start command is a nop.
8536 +     2) We avoid a test-and-set race condition. */
8537 +  switch(usb_pipetype(urb->pipe)) {
8538 +  case PIPE_BULK:
8539 +    /* Assert that the EP descriptor is disabled. */
8540 +    ASSERT(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
8541 +
8542 +    /* Set up and enable the EP descriptor. */
8543 +    TxBulkEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
8544 +    TxBulkEPList[epid].hw_len = 0;
8545 +    TxBulkEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
8546 +
8547 +    /* Check if the dummy list is already with us (if several urbs were queued). */
8548 +    if (usb_pipein(urb->pipe) && (TxBulkEPList[epid].next != virt_to_phys(&TxBulkDummyEPList[epid][0]))) {
8549 +      tc_dbg("Inviting dummy list to the party for urb 0x%lx, epid %d", 
8550 +            (unsigned long)urb, epid);
8551 +      
8552 +      /* We don't need to check if the DMA is at this EP or not before changing the
8553 +        next pointer, since we will do it in one 32-bit write (EP descriptors are
8554 +        32-bit aligned). */
8555 +      TxBulkEPList[epid].next = virt_to_phys(&TxBulkDummyEPList[epid][0]);
8556 +    }
8557 +
8558 +    restart_dma8_sub0();
8559 +
8560 +    /* Update/restart the bulk start timer since we just started the channel.*/
8561 +    mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
8562 +    /* Update/restart the bulk eot timer since we just inserted traffic. */
8563 +    mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
8564 +    break;
8565 +  case PIPE_CONTROL:
8566 +    /* Assert that the EP descriptor is disabled. */
8567 +    ASSERT(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
8568 +
8569 +    /* Set up and enable the EP descriptor. */
8570 +    TxCtrlEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
8571 +    TxCtrlEPList[epid].hw_len = 0;
8572 +    TxCtrlEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
8573 +
8574 +    *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
8575 +    break;
8576 +  }
8577 +  local_irq_restore(flags);
8578 +}
8579 +
8580 +static void tc_dma_link_intr_urb(struct urb *urb) {
8581 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
8582 +  volatile struct USB_EP_Desc *tmp_ep;
8583 +  struct USB_EP_Desc *ep_desc;
8584 +  int i = 0, epid;
8585 +  int pool_idx = 0;
8586 +
8587 +  ASSERT(urb_priv != NULL);
8588 +  epid = urb_priv->epid;
8589 +  ASSERT(urb_priv->interval > 0);
8590 +  ASSERT(urb_priv->intr_ep_pool_length > 0);
8591 +
8592 +  tmp_ep = &TxIntrEPList[0];
8593 +
8594 +  /* Only insert one EP descriptor in list for Out Intr URBs.
8595 +     We can only handle Out Intr with interval of 128ms because
8596 +     it's not possible to insert several Out Intr EPs because they
8597 +     are not consumed by the DMA. */
8598 +  if(usb_pipeout(urb->pipe)) {
8599 +    ep_desc = urb_priv->intr_ep_pool[0];
8600 +    ASSERT(ep_desc);
8601 +    ep_desc->next = tmp_ep->next;
8602 +    tmp_ep->next = virt_to_phys(ep_desc);
8603 +    i++;
8604 +  } else {
8605 +    /* Loop through Intr EP descriptor list and insert EP for URB at
8606 +       specified interval */
8607 +    do {
8608 +      /* Each EP descriptor with eof flag sat signals a new frame */
8609 +      if (tmp_ep->command & IO_MASK(USB_EP_command, eof)) {
8610 +       /* Insert a EP from URBs EP pool at correct interval */
8611 +       if ((i % urb_priv->interval) == 0) {
8612 +         ep_desc = urb_priv->intr_ep_pool[pool_idx];
8613 +         ASSERT(ep_desc);
8614 +         ep_desc->next = tmp_ep->next;
8615 +         tmp_ep->next = virt_to_phys(ep_desc);
8616 +         pool_idx++;
8617 +         ASSERT(pool_idx <= urb_priv->intr_ep_pool_length);
8618         }
8619 +       i++;
8620 +      }
8621 +      tmp_ep = (struct USB_EP_Desc *)phys_to_virt(tmp_ep->next);
8622 +    } while(tmp_ep != &TxIntrEPList[0]);
8623 +  }
8624 +
8625 +  intr_dbg("Added SBs to intr epid %d: %s interval:%d (%d EP)\n", epid,
8626 +          sblist_to_str(urb_priv->first_sb), urb_priv->interval, pool_idx);
8627 +
8628 +  /* We start the DMA sub channel without checking if it's running or not,
8629 +     because:
8630 +     1) If it's already running, issuing the start command is a nop.
8631 +     2) We avoid a test-and-set race condition. */
8632 +  *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
8633 +}
8634 +
8635 +static void tc_dma_process_isoc_urb(struct urb *urb) {
8636 +  unsigned long flags;
8637 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
8638 +  int epid;
8639 +
8640 +  /* Do not disturb us while fiddling with EPs and epids */
8641 +  local_irq_save(flags);
8642 +
8643 +  ASSERT(urb_priv);
8644 +  ASSERT(urb_priv->first_sb);
8645 +  epid = urb_priv->epid;
8646 +
8647 +  if(activeUrbList[epid] == NULL) {
8648 +    /* EP is idle, so make this URB active */
8649 +    activeUrbList[epid] = urb;
8650 +    urb_list_del(urb, epid);
8651 +    ASSERT(TxIsocEPList[epid].sub == 0);
8652 +    ASSERT(!(TxIsocEPList[epid].command &
8653 +            IO_STATE(USB_EP_command, enable, yes)));
8654 +
8655 +    /* Differentiate between In and Out Isoc. Because In SBs are not consumed*/
8656 +    if(usb_pipein(urb->pipe)) {
8657 +    /* Each EP for In Isoc will have only one SB descriptor, setup when
8658 +       submitting the first active urb. We do it here by copying from URBs
8659 +       pre-allocated SB. */
8660 +      memcpy((void *)&(TxIsocSBList[epid]), urb_priv->first_sb,
8661 +            sizeof(TxIsocSBList[epid]));
8662 +      TxIsocEPList[epid].hw_len = 0;
8663 +      TxIsocEPList[epid].sub = virt_to_phys(&(TxIsocSBList[epid]));
8664 +    } else {
8665 +      /* For Out Isoc we attach the pre-allocated list of SBs for the URB */
8666 +      TxIsocEPList[epid].hw_len = 0;
8667 +      TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
8668 +
8669 +      isoc_dbg("Attached first URB:0x%x[%d] to epid:%d first_sb:0x%x"
8670 +              " last_sb::0x%x\n",
8671 +              (unsigned int)urb, urb_priv->urb_num, epid,
8672 +              (unsigned int)(urb_priv->first_sb),
8673 +              (unsigned int)(urb_priv->last_sb));
8674 +    }
8675 +
8676 +    if (urb->transfer_flags & URB_ISO_ASAP) {
8677 +      /* The isoc transfer should be started as soon as possible. The
8678 +        start_frame field is a return value if URB_ISO_ASAP was set. Comparing
8679 +        R_USB_FM_NUMBER with a USB Chief trace shows that the first isoc IN
8680 +        token is sent 2 frames later. I'm not sure how this affects usage of
8681 +        the start_frame field by the device driver, or how it affects things
8682 +        when USB_ISO_ASAP is not set, so therefore there's no compensation for
8683 +        the 2 frame "lag" here. */
8684 +      urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
8685 +      TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
8686 +      urb_priv->urb_state = STARTED;
8687 +      isoc_dbg("URB_ISO_ASAP set, urb->start_frame set to %d\n",
8688 +              urb->start_frame);
8689 +    } else {
8690 +      /* Not started yet. */
8691 +      urb_priv->urb_state = NOT_STARTED;
8692 +      isoc_warn("urb_priv->urb_state set to NOT_STARTED for URB:0x%x\n",
8693 +               (unsigned int)urb);
8694 +    }
8695 +
8696 +  } else {
8697 +    /* An URB is already active on the EP. Leave URB in queue and let
8698 +       finish_isoc_urb process it after current active URB */
8699 +    ASSERT(TxIsocEPList[epid].sub != 0);
8700 +
8701 +    if(usb_pipein(urb->pipe)) {
8702 +      /* Because there already is a active In URB on this epid we do nothing
8703 +         and the finish_isoc_urb() function will handle switching to next URB*/
8704 +
8705 +    } else { /* For Out Isoc, insert new URBs traffic last in SB-list. */
8706 +      struct USB_SB_Desc *temp_sb_desc;
8707 +
8708 +      /* Set state STARTED to all Out Isoc URBs added to SB list because we
8709 +         don't know how many of them that are finished before descr interrupt*/
8710 +      urb_priv->urb_state = STARTED;
8711 +
8712 +      /* Find end of current SB list by looking for SB with eol flag sat */
8713 +      temp_sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
8714 +      while ((temp_sb_desc->command & IO_MASK(USB_SB_command, eol)) !=
8715 +            IO_STATE(USB_SB_command, eol, yes)) {
8716 +       ASSERT(temp_sb_desc->next);
8717 +       temp_sb_desc = phys_to_virt(temp_sb_desc->next);
8718 +      }
8719 +
8720 +      isoc_dbg("Appended URB:0x%x[%d] (first:0x%x last:0x%x) to epid:%d"
8721 +              " sub:0x%x eol:0x%x\n",
8722 +              (unsigned int)urb, urb_priv->urb_num,
8723 +              (unsigned int)(urb_priv->first_sb),
8724 +              (unsigned int)(urb_priv->last_sb), epid,
8725 +              (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
8726 +              (unsigned int)temp_sb_desc);
8727 +
8728 +      /* Next pointer must be set before eol is removed. */
8729 +      temp_sb_desc->next = virt_to_phys(urb_priv->first_sb);
8730 +      /* Clear the previous end of list flag since there is a new in the
8731 +        added SB descriptor list. */
8732 +      temp_sb_desc->command &= ~IO_MASK(USB_SB_command, eol);
8733 +
8734 +      if (!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
8735 +       __u32 epid_data;
8736 +       /* 8.8.5 in Designer's Reference says we should check for and correct
8737 +          any errors in the EP here.  That should not be necessary if
8738 +          epid_attn is handled correctly, so we assume all is ok. */
8739 +       epid_data = etrax_epid_iso_get(epid);
8740 +       if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) !=
8741 +           IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
8742 +         isoc_err("Disabled Isoc EP with error:%d on epid:%d when appending"
8743 +                  " URB:0x%x[%d]\n",
8744 +                  IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data), epid,
8745 +                  (unsigned int)urb, urb_priv->urb_num);
8746 +       }
8747 +
8748 +       /* The SB list was exhausted. */
8749 +       if (virt_to_phys(urb_priv->last_sb) != TxIsocEPList[epid].sub) {
8750 +         /* The new sublist did not get processed before the EP was
8751 +            disabled.  Setup the EP again. */
8752 +
8753 +         if(virt_to_phys(temp_sb_desc) == TxIsocEPList[epid].sub) {
8754 +           isoc_dbg("EP for epid:%d stoped at SB:0x%x before newly inserted"
8755 +                    ", restarting from this URBs SB:0x%x\n",
8756 +                    epid, (unsigned int)temp_sb_desc,
8757 +                    (unsigned int)(urb_priv->first_sb));
8758 +           TxIsocEPList[epid].hw_len = 0;
8759 +           TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
8760 +           urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
8761 +           /* Enable the EP again so data gets processed this time */
8762 +           TxIsocEPList[epid].command |=
8763 +             IO_STATE(USB_EP_command, enable, yes);
8764 +
8765 +         } else {
8766 +           /* The EP has been disabled but not at end this URB (god knows
8767 +              where). This should generate an epid_attn so we should not be
8768 +              here */
8769 +           isoc_warn("EP was disabled on sb:0x%x before SB list for"
8770 +                    " URB:0x%x[%d] got processed\n",
8771 +                    (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
8772 +                    (unsigned int)urb, urb_priv->urb_num);
8773 +         }
8774 +       } else {
8775 +         /* This might happend if we are slow on this function and isn't
8776 +            an error. */
8777 +         isoc_dbg("EP was disabled and finished with SBs from appended"
8778 +                  " URB:0x%x[%d]\n", (unsigned int)urb, urb_priv->urb_num);
8779 +       }
8780 +      }
8781 +    }
8782 +  }
8783 +  
8784 +  /* Start the DMA sub channel */
8785 +  *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
8786 +
8787 +  local_irq_restore(flags);
8788 +}
8789 +
8790 +static void tc_dma_unlink_intr_urb(struct urb *urb) {
8791 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
8792 +  volatile struct USB_EP_Desc *first_ep;  /* First EP in the list. */
8793 +  volatile struct USB_EP_Desc *curr_ep;   /* Current EP, the iterator. */
8794 +  volatile struct USB_EP_Desc *next_ep;   /* The EP after current. */
8795 +  volatile struct USB_EP_Desc *unlink_ep; /* The one we should remove from
8796 +                                            the list. */
8797 +  int count = 0;
8798 +  volatile int timeout = 10000;
8799 +  int epid;
8800 +
8801 +  /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the
8802 +     List". */
8803 +  ASSERT(urb_priv);
8804 +  ASSERT(urb_priv->intr_ep_pool_length > 0);
8805 +  epid = urb_priv->epid;
8806 +
8807 +  /* First disable all Intr EPs belonging to epid for this URB */
8808 +  first_ep = &TxIntrEPList[0];
8809 +  curr_ep = first_ep;
8810 +  do {
8811 +    next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
8812 +    if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
8813 +      /* Disable EP */
8814 +      next_ep->command &= ~IO_MASK(USB_EP_command, enable);
8815 +    }
8816 +    curr_ep = phys_to_virt(curr_ep->next);
8817 +  } while (curr_ep != first_ep);
8818 +
8819 +
8820 +  /* Now unlink all EPs belonging to this epid from Descr list */
8821 +  first_ep = &TxIntrEPList[0];
8822 +  curr_ep = first_ep;
8823 +  do {
8824 +    next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
8825 +    if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
8826 +      /* This is the one we should unlink. */
8827 +      unlink_ep = next_ep;
8828 +
8829 +      /* Actually unlink the EP from the DMA list. */
8830 +      curr_ep->next = unlink_ep->next;
8831 +
8832 +      /* Wait until the DMA is no longer at this descriptor. */
8833 +      while((*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep)) &&
8834 +           (timeout-- > 0));
8835 +      if(timeout == 0) {
8836 +       warn("Timeout while waiting for DMA-TX-Intr to leave unlink EP\n");
8837 +      }
8838 +      
8839 +      count++;
8840 +    }
8841 +    curr_ep = phys_to_virt(curr_ep->next);
8842 +  } while (curr_ep != first_ep);
8843 +
8844 +  if(count != urb_priv->intr_ep_pool_length) {
8845 +    intr_warn("Unlinked %d of %d Intr EPs for URB:0x%x[%d]\n", count,
8846 +             urb_priv->intr_ep_pool_length, (unsigned int)urb,
8847 +             urb_priv->urb_num);
8848 +  } else {
8849 +    intr_dbg("Unlinked %d of %d interrupt EPs for URB:0x%x\n", count,
8850 +            urb_priv->intr_ep_pool_length, (unsigned int)urb);
8851 +  }
8852 +}
8853 +
8854 +static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
8855 +                                                   int timer) {
8856 +  unsigned long flags;
8857 +  int epid;
8858 +  struct urb *urb;
8859 +  struct crisv10_urb_priv * urb_priv;
8860 +  __u32 epid_data;
8861 +
8862 +  /* Protect TxEPList */
8863 +  local_irq_save(flags);
8864 +
8865 +  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
8866 +    /* A finished EP descriptor is disabled and has a valid sub pointer */
8867 +    if (!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
8868 +       (TxBulkEPList[epid].sub != 0)) {
8869 +
8870 +      /* Get the active URB for this epid */
8871 +      urb = activeUrbList[epid];
8872 +      /* Sanity checks */
8873 +      ASSERT(urb);
8874 +      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
8875 +      ASSERT(urb_priv);
8876 +      
8877 +      /* Only handle finished out Bulk EPs here,
8878 +        and let RX interrupt take care of the rest */
8879 +      if(!epid_out_traffic(epid)) {
8880 +       continue;
8881 +      }
8882 +
8883 +      if(timer) {
8884 +       tc_warn("Found finished %s Bulk epid:%d URB:0x%x[%d] from timeout\n",
8885 +               epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
8886 +               urb_priv->urb_num);
8887 +      } else {
8888 +       tc_dbg("Found finished %s Bulk epid:%d URB:0x%x[%d] from interrupt\n",
8889 +              epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
8890 +              urb_priv->urb_num);
8891 +      }
8892 +
8893 +      if(urb_priv->urb_state == UNLINK) {
8894 +       /* This Bulk URB is requested to be unlinked, that means that the EP
8895 +          has been disabled and we might not have sent all data */
8896 +       tc_finish_urb(hcd, urb, urb->status);
8897 +       continue;
8898 +      }
8899 +
8900 +      ASSERT(urb_priv->urb_state == STARTED);
8901 +      if (phys_to_virt(TxBulkEPList[epid].sub) != urb_priv->last_sb) {
8902 +       tc_err("Endpoint got disabled before reaching last sb\n");
8903 +      }
8904 +       
8905 +      epid_data = etrax_epid_get(epid);
8906 +      if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
8907 +         IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
8908 +       /* This means that the endpoint has no error, is disabled
8909 +          and had inserted traffic, i.e. transfer successfully completed. */
8910 +       tc_finish_urb(hcd, urb, 0);
8911 +      } else {
8912 +       /* Shouldn't happen. We expect errors to be caught by epid
8913 +          attention. */
8914 +       tc_err("Found disabled bulk EP desc (epid:%d error:%d)\n",
8915 +              epid, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
8916 +      }
8917 +    } else {
8918 +      tc_dbg("Ignoring In Bulk epid:%d, let RX interrupt handle it\n", epid);
8919 +    }
8920 +  }
8921 +
8922 +  local_irq_restore(flags);
8923 +}
8924 +
8925 +static void check_finished_ctrl_tx_epids(struct usb_hcd *hcd) {
8926 +  unsigned long flags;
8927 +  int epid;
8928 +  struct urb *urb;
8929 +  struct crisv10_urb_priv * urb_priv;
8930 +  __u32 epid_data;
8931 +
8932 +  /* Protect TxEPList */
8933 +  local_irq_save(flags);
8934 +
8935 +  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
8936 +    if(epid == DUMMY_EPID)
8937 +      continue;
8938 +
8939 +    /* A finished EP descriptor is disabled and has a valid sub pointer */
8940 +    if (!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
8941 +       (TxCtrlEPList[epid].sub != 0)) {
8942 +      
8943 +      /* Get the active URB for this epid */
8944 +      urb = activeUrbList[epid];
8945 +
8946 +      if(urb == NULL) {
8947 +       tc_warn("Found finished Ctrl epid:%d with no active URB\n", epid);
8948 +       continue;
8949 +      }
8950 +      
8951 +      /* Sanity checks */
8952 +      ASSERT(usb_pipein(urb->pipe));
8953 +      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
8954 +      ASSERT(urb_priv);
8955 +      if (phys_to_virt(TxCtrlEPList[epid].sub) != urb_priv->last_sb) {
8956 +       tc_err("Endpoint got disabled before reaching last sb\n");
8957 +      }
8958 +
8959 +      epid_data = etrax_epid_get(epid);
8960 +      if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
8961 +         IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
8962 +       /* This means that the endpoint has no error, is disabled
8963 +          and had inserted traffic, i.e. transfer successfully completed. */
8964 +
8965 +       /* Check if RX-interrupt for In Ctrl has been processed before
8966 +          finishing the URB */
8967 +       if(urb_priv->ctrl_rx_done) {
8968 +         tc_dbg("Finishing In Ctrl URB:0x%x[%d] in tx_interrupt\n",
8969 +                (unsigned int)urb, urb_priv->urb_num);
8970 +         tc_finish_urb(hcd, urb, 0);
8971 +       } else {
8972 +         /* If we get zout descriptor interrupt before RX was done for a
8973 +            In Ctrl transfer, then we flag that and it will be finished
8974 +            in the RX-Interrupt */
8975 +         urb_priv->ctrl_zout_done = 1;
8976 +         tc_dbg("Got zout descr interrupt before RX interrupt\n");
8977 +       }
8978 +      } else {
8979 +       /* Shouldn't happen. We expect errors to be caught by epid
8980 +          attention. */
8981 +       tc_err("Found disabled Ctrl EP desc (epid:%d URB:0x%x[%d]) error_code:%d\n", epid, (unsigned int)urb, urb_priv->urb_num, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
8982 +       __dump_ep_desc(&(TxCtrlEPList[epid]));
8983 +       __dump_ept_data(epid);
8984 +      }      
8985 +    }
8986 +  }
8987 +  local_irq_restore(flags);
8988 +}
8989 +
8990 +/* This function goes through all epids that are setup for Out Isoc transfers
8991 +   and marks (isoc_out_done) all queued URBs that the DMA has finished
8992 +   transfer for.
8993 +   No URB completetion is done here to make interrupt routine return quickly.
8994 +   URBs are completed later with help of complete_isoc_bottom_half() that
8995 +   becomes schedules when this functions is finished. */
8996 +static void check_finished_isoc_tx_epids(void) {
8997 +  unsigned long flags;
8998 +  int epid;
8999 +  struct urb *urb;
9000 +  struct crisv10_urb_priv * urb_priv;
9001 +  struct USB_SB_Desc* sb_desc;
9002 +  int epid_done;
9003 +
9004 +  /* Protect TxIsocEPList */
9005 +  local_irq_save(flags);
9006 +
9007 +  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
9008 +    if (TxIsocEPList[epid].sub == 0 || epid == INVALID_EPID ||
9009 +       !epid_out_traffic(epid)) {
9010 +      /* Nothing here to see. */
9011 +      continue;
9012 +    }
9013 +    ASSERT(epid_inuse(epid));
9014 +    ASSERT(epid_isoc(epid));
9015 +
9016 +    sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
9017 +    /* Find the last descriptor of the currently active URB for this ep.
9018 +       This is the first descriptor in the sub list marked for a descriptor
9019 +       interrupt. */
9020 +    while (sb_desc && !IO_EXTRACT(USB_SB_command, intr, sb_desc->command)) {
9021 +      sb_desc = sb_desc->next ? phys_to_virt(sb_desc->next) : 0;
9022 +    }
9023 +    ASSERT(sb_desc);
9024 +
9025 +    isoc_dbg("Descr IRQ checking epid:%d sub:0x%x intr:0x%x\n",
9026 +            epid, (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
9027 +            (unsigned int)sb_desc);
9028 +
9029 +    urb = activeUrbList[epid];
9030 +    if(urb == NULL) {
9031 +      isoc_err("Isoc Descr irq on epid:%d with no active URB\n", epid);
9032 +      continue;
9033 +    }
9034 +
9035 +    epid_done = 0;
9036 +    while(urb && !epid_done) {
9037 +      /* Sanity check. */
9038 +      ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
9039 +      ASSERT(usb_pipeout(urb->pipe));
9040 +      
9041 +      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
9042 +      ASSERT(urb_priv);
9043 +      ASSERT(urb_priv->urb_state == STARTED ||
9044 +            urb_priv->urb_state == UNLINK);
9045 +      
9046 +      if (sb_desc != urb_priv->last_sb) {
9047 +       /* This urb has been sent. */
9048 +       urb_priv->isoc_out_done = 1;
9049 +
9050 +      } else { /* Found URB that has last_sb as the interrupt reason */
9051 +
9052 +       /* Check if EP has been disabled, meaning that all transfers are done*/
9053 +       if(!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
9054 +         ASSERT((sb_desc->command & IO_MASK(USB_SB_command, eol)) ==
9055 +                IO_STATE(USB_SB_command, eol, yes));
9056 +         ASSERT(sb_desc->next == 0);
9057 +         urb_priv->isoc_out_done = 1;
9058 +       } else {
9059 +         isoc_dbg("Skipping URB:0x%x[%d] because EP not disabled yet\n",
9060 +                  (unsigned int)urb, urb_priv->urb_num);
9061 +       }
9062 +       /* Stop looking any further in queue */
9063 +       epid_done = 1;  
9064 +      }
9065 +
9066 +      if (!epid_done) {
9067 +       if(urb == activeUrbList[epid]) {
9068 +         urb = urb_list_first(epid);
9069 +       } else {
9070 +         urb = urb_list_next(urb, epid);
9071 +       }
9072 +      }
9073 +    } /* END: while(urb && !epid_done) */
9074 +  }
9075 +
9076 +  local_irq_restore(flags);
9077 +}
9078 +
9079 +
9080 +/* This is where the Out Isoc URBs are realy completed. This function is
9081 +   scheduled from tc_dma_tx_interrupt() when one or more Out Isoc transfers
9082 +   are done. This functions completes all URBs earlier marked with
9083 +   isoc_out_done by fast interrupt routine check_finished_isoc_tx_epids() */
9084 +
9085 +static void complete_isoc_bottom_half(void *data) {
9086 +  struct crisv10_isoc_complete_data *comp_data;
9087 +  struct usb_iso_packet_descriptor *packet;
9088 +  struct crisv10_urb_priv * urb_priv;
9089 +  unsigned long flags;
9090 +  struct urb* urb;
9091 +  int epid_done;
9092 +  int epid;
9093 +  int i;
9094 +
9095 +  comp_data = (struct crisv10_isoc_complete_data*)data;
9096 +
9097 +  local_irq_save(flags);
9098 +
9099 +  for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
9100 +    if(!epid_inuse(epid) || !epid_isoc(epid) || !epid_out_traffic(epid) || epid == DUMMY_EPID) {
9101 +      /* Only check valid Out Isoc epids */
9102 +      continue;
9103 +    }
9104 +
9105 +    isoc_dbg("Isoc bottom-half checking epid:%d, sub:0x%x\n", epid,
9106 +            (unsigned int)phys_to_virt(TxIsocEPList[epid].sub));
9107 +
9108 +    /* The descriptor interrupt handler has marked all transmitted Out Isoc
9109 +       URBs with isoc_out_done.  Now we traverse all epids and for all that
9110 +       have out Isoc traffic we traverse its URB list and complete the
9111 +       transmitted URBs. */
9112 +    epid_done = 0;
9113 +    while (!epid_done) {
9114 +
9115 +      /* Get the active urb (if any) */
9116 +      urb = activeUrbList[epid];
9117 +      if (urb == 0) {
9118 +       isoc_dbg("No active URB on epid:%d anymore\n", epid);
9119 +       epid_done = 1;
9120 +       continue;
9121 +      }
9122 +
9123 +      /* Sanity check. */
9124 +      ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
9125 +      ASSERT(usb_pipeout(urb->pipe));
9126 +
9127 +      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
9128 +      ASSERT(urb_priv);
9129 +
9130 +      if (!(urb_priv->isoc_out_done)) {
9131 +       /* We have reached URB that isn't flaged done yet, stop traversing. */
9132 +       isoc_dbg("Stoped traversing Out Isoc URBs on epid:%d"
9133 +                " before not yet flaged URB:0x%x[%d]\n",
9134 +                epid, (unsigned int)urb, urb_priv->urb_num);
9135 +       epid_done = 1;
9136 +       continue;
9137 +      }
9138 +
9139 +      /* This urb has been sent. */
9140 +      isoc_dbg("Found URB:0x%x[%d] that is flaged isoc_out_done\n",
9141 +              (unsigned int)urb, urb_priv->urb_num);
9142 +
9143 +      /* Set ok on transfered packets for this URB and finish it */
9144 +      for (i = 0; i < urb->number_of_packets; i++) {
9145 +       packet = &urb->iso_frame_desc[i];
9146 +       packet->status = 0;
9147 +       packet->actual_length = packet->length;
9148 +      }
9149 +      urb_priv->isoc_packet_counter = urb->number_of_packets;
9150 +      tc_finish_urb(comp_data->hcd, urb, 0);
9151 +
9152 +    } /* END: while(!epid_done) */
9153 +  } /* END: for(epid...) */
9154 +
9155 +  local_irq_restore(flags);
9156 +  kmem_cache_free(isoc_compl_cache, comp_data);
9157 +}
9158 +
9159 +
9160 +static void check_finished_intr_tx_epids(struct usb_hcd *hcd) {
9161 +  unsigned long flags;
9162 +  int epid;
9163 +  struct urb *urb;
9164 +  struct crisv10_urb_priv * urb_priv;
9165 +  volatile struct USB_EP_Desc *curr_ep;   /* Current EP, the iterator. */
9166 +  volatile struct USB_EP_Desc *next_ep;   /* The EP after current. */
9167 +
9168 +  /* Protect TxintrEPList */
9169 +  local_irq_save(flags);
9170 +
9171 +  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
9172 +    if(!epid_inuse(epid) || !epid_intr(epid) || !epid_out_traffic(epid)) {
9173 +      /* Nothing to see on this epid. Only check valid Out Intr epids */
9174 +      continue;
9175 +    }
9176 +
9177 +    urb = activeUrbList[epid];
9178 +    if(urb == 0) {
9179 +      intr_warn("Found Out Intr epid:%d with no active URB\n", epid);
9180 +      continue;
9181 +    }
9182 +
9183 +    /* Sanity check. */
9184 +    ASSERT(usb_pipetype(urb->pipe) == PIPE_INTERRUPT);
9185 +    ASSERT(usb_pipeout(urb->pipe));
9186 +    
9187 +    urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
9188 +    ASSERT(urb_priv);
9189 +
9190 +    /* Go through EPs between first and second sof-EP. It's here Out Intr EPs
9191 +       are inserted.*/
9192 +    curr_ep = &TxIntrEPList[0];
9193 +    do {
9194 +      next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
9195 +      if(next_ep == urb_priv->intr_ep_pool[0]) {
9196 +       /* We found the Out Intr EP for this epid */
9197 +       
9198 +       /* Disable it so it doesn't get processed again */
9199 +       next_ep->command &= ~IO_MASK(USB_EP_command, enable);
9200 +
9201 +       /* Finish the active Out Intr URB with status OK */
9202 +       tc_finish_urb(hcd, urb, 0);
9203 +      }
9204 +      curr_ep = phys_to_virt(curr_ep->next);
9205 +    } while (curr_ep != &TxIntrEPList[1]);
9206 +
9207 +  }
9208 +  local_irq_restore(flags);
9209 +}
9210 +
9211 +/* Interrupt handler for DMA8/IRQ24 with subchannels (called from hardware intr) */
9212 +static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc) {
9213 +  struct usb_hcd *hcd = (struct usb_hcd*)vhc;
9214 +  ASSERT(hcd);
9215 +
9216 +  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub0_descr)) {
9217 +    /* Clear this interrupt */
9218 +    *R_DMA_CH8_SUB0_CLR_INTR = IO_STATE(R_DMA_CH8_SUB0_CLR_INTR, clr_descr, do);
9219 +    restart_dma8_sub0();
9220 +  }
9221 +
9222 +  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub1_descr)) {
9223 +    /* Clear this interrupt */
9224 +    *R_DMA_CH8_SUB1_CLR_INTR = IO_STATE(R_DMA_CH8_SUB1_CLR_INTR, clr_descr, do);
9225 +    check_finished_ctrl_tx_epids(hcd);
9226 +  }
9227 +
9228 +  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub2_descr)) {
9229 +    /* Clear this interrupt */
9230 +    *R_DMA_CH8_SUB2_CLR_INTR = IO_STATE(R_DMA_CH8_SUB2_CLR_INTR, clr_descr, do);
9231 +    check_finished_intr_tx_epids(hcd);
9232 +  }
9233 +
9234 +  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub3_descr)) {
9235 +    struct crisv10_isoc_complete_data* comp_data;
9236 +
9237 +    /* Flag done Out Isoc for later completion */
9238 +    check_finished_isoc_tx_epids();
9239 +
9240 +    /* Clear this interrupt */
9241 +    *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
9242 +    /* Schedule bottom half of Out Isoc completion function. This function
9243 +       finishes the URBs marked with isoc_out_done */
9244 +    comp_data = (struct crisv10_isoc_complete_data*)
9245 +      kmem_cache_alloc(isoc_compl_cache, SLAB_ATOMIC);
9246 +    ASSERT(comp_data != NULL);
9247 +    comp_data ->hcd = hcd;
9248 +
9249 +    INIT_WORK(&comp_data->usb_bh, complete_isoc_bottom_half, comp_data);
9250 +    schedule_work(&comp_data->usb_bh);
9251 +  }
9252 +
9253 +  return IRQ_HANDLED;
9254 +}
9255 +
9256 +/* Interrupt handler for DMA9/IRQ25 (called from hardware intr) */
9257 +static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc) {
9258 +  unsigned long flags;
9259 +  struct urb *urb;
9260 +  struct usb_hcd *hcd = (struct usb_hcd*)vhc;
9261 +  struct crisv10_urb_priv *urb_priv;
9262 +  int epid = 0;
9263 +  int real_error;
9264 +
9265 +  ASSERT(hcd);
9266 +
9267 +  /* Clear this interrupt. */
9268 +  *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do);
9269 +
9270 +  /* Custom clear interrupt for this interrupt */
9271 +  /* The reason we cli here is that we call the driver's callback functions. */
9272 +  local_irq_save(flags);
9273 +
9274 +  /* Note that this while loop assumes that all packets span only
9275 +     one rx descriptor. */
9276 +  while(myNextRxDesc->status & IO_MASK(USB_IN_status, eop)) {
9277 +    epid = IO_EXTRACT(USB_IN_status, epid, myNextRxDesc->status);
9278 +    /* Get the active URB for this epid */
9279 +    urb = activeUrbList[epid];
9280 +
9281 +    ASSERT(epid_inuse(epid));
9282 +    if (!urb) {
9283 +      dma_err("No urb for epid %d in rx interrupt\n", epid);
9284 +      goto skip_out;
9285 +    }
9286 +
9287 +    /* Check if any errors on epid */
9288 +    real_error = 0;
9289 +    if (myNextRxDesc->status & IO_MASK(USB_IN_status, error)) {
9290 +      __u32 r_usb_ept_data;
9291 +
9292 +      if (usb_pipeisoc(urb->pipe)) {
9293 +       r_usb_ept_data = etrax_epid_iso_get(epid);
9294 +       if((r_usb_ept_data & IO_MASK(R_USB_EPT_DATA_ISO, valid)) &&
9295 +          (IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data) == 0) &&
9296 +          (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata))) {
9297 +         /* Not an error, just a failure to receive an expected iso
9298 +            in packet in this frame.  This is not documented
9299 +            in the designers reference. Continue processing.
9300 +         */
9301 +       } else real_error = 1;
9302 +      } else real_error = 1;
9303 +    }
9304 +
9305 +    if(real_error) {
9306 +      dma_err("Error in RX descr on epid:%d for URB 0x%x",
9307 +             epid, (unsigned int)urb);
9308 +      dump_ept_data(epid);
9309 +      dump_in_desc(myNextRxDesc);
9310 +      goto skip_out;
9311 +    }
9312 +
9313 +    urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
9314 +    ASSERT(urb_priv);
9315 +    ASSERT(urb_priv->urb_state == STARTED ||
9316 +          urb_priv->urb_state == UNLINK);
9317 +
9318 +    if ((usb_pipetype(urb->pipe) == PIPE_BULK) ||
9319 +       (usb_pipetype(urb->pipe) == PIPE_CONTROL) ||
9320 +       (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
9321 +
9322 +      /* We get nodata for empty data transactions, and the rx descriptor's
9323 +        hw_len field is not valid in that case. No data to copy in other
9324 +        words. */
9325 +      if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
9326 +       /* No data to copy */
9327 +      } else {
9328 +       /*
9329 +       dma_dbg("Processing RX for URB:0x%x epid:%d (data:%d ofs:%d)\n",
9330 +               (unsigned int)urb, epid, myNextRxDesc->hw_len,
9331 +               urb_priv->rx_offset);
9332 +       */
9333 +       /* Only copy data if URB isn't flaged to be unlinked*/
9334 +       if(urb_priv->urb_state != UNLINK) {
9335 +         /* Make sure the data fits in the buffer. */
9336 +         if(urb_priv->rx_offset + myNextRxDesc->hw_len
9337 +            <= urb->transfer_buffer_length) {
9338 +
9339 +           /* Copy the data to URBs buffer */
9340 +           memcpy(urb->transfer_buffer + urb_priv->rx_offset,
9341 +                  phys_to_virt(myNextRxDesc->buf), myNextRxDesc->hw_len);
9342 +           urb_priv->rx_offset += myNextRxDesc->hw_len;
9343 +         } else {
9344 +           /* Signal overflow when returning URB */
9345 +           urb->status = -EOVERFLOW;
9346 +           tc_finish_urb_later(hcd, urb, urb->status);
9347 +         }
9348 +       }
9349 +      }
9350 +
9351 +      /* Check if it was the last packet in the transfer */
9352 +      if (myNextRxDesc->status & IO_MASK(USB_IN_status, eot)) {
9353 +       /* Special handling for In Ctrl URBs. */
9354 +       if(usb_pipecontrol(urb->pipe) && usb_pipein(urb->pipe) &&
9355 +          !(urb_priv->ctrl_zout_done)) {
9356 +         /* Flag that RX part of Ctrl transfer is done. Because zout descr
9357 +            interrupt hasn't happend yet will the URB be finished in the
9358 +            TX-Interrupt. */
9359 +         urb_priv->ctrl_rx_done = 1;
9360 +         tc_dbg("Not finishing In Ctrl URB:0x%x from rx_interrupt, waiting"
9361 +                " for zout\n", (unsigned int)urb);
9362 +       } else {
9363 +         tc_finish_urb(hcd, urb, 0);
9364 +       }
9365 +      }
9366 +    } else { /* ISOC RX */
9367 +      /*
9368 +      isoc_dbg("Processing RX for epid:%d (URB:0x%x) ISOC pipe\n",
9369 +              epid, (unsigned int)urb);
9370 +      */
9371 +
9372 +      struct usb_iso_packet_descriptor *packet;
9373 +
9374 +      if (urb_priv->urb_state == UNLINK) {
9375 +       isoc_warn("Ignoring Isoc Rx data for urb being unlinked.\n");
9376 +       goto skip_out;
9377 +      } else if (urb_priv->urb_state == NOT_STARTED) {
9378 +       isoc_err("What? Got Rx data for Isoc urb that isn't started?\n");
9379 +       goto skip_out;
9380 +      }
9381 +
9382 +      packet = &urb->iso_frame_desc[urb_priv->isoc_packet_counter];
9383 +      ASSERT(packet);
9384 +      packet->status = 0;
9385 +
9386 +      if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
9387 +       /* We get nodata for empty data transactions, and the rx descriptor's
9388 +          hw_len field is not valid in that case. We copy 0 bytes however to
9389 +          stay in synch. */
9390 +       packet->actual_length = 0;
9391 +      } else {
9392 +       packet->actual_length = myNextRxDesc->hw_len;
9393 +       /* Make sure the data fits in the buffer. */
9394 +       ASSERT(packet->actual_length <= packet->length);
9395 +       memcpy(urb->transfer_buffer + packet->offset,
9396 +              phys_to_virt(myNextRxDesc->buf), packet->actual_length);
9397 +       if(packet->actual_length > 0)
9398 +         isoc_dbg("Copied %d bytes, packet %d for URB:0x%x[%d]\n",
9399 +                  packet->actual_length, urb_priv->isoc_packet_counter,
9400 +                  (unsigned int)urb, urb_priv->urb_num);
9401 +      }
9402 +
9403 +      /* Increment the packet counter. */
9404 +      urb_priv->isoc_packet_counter++;
9405 +
9406 +      /* Note that we don't care about the eot field in the rx descriptor's
9407 +        status. It will always be set for isoc traffic. */
9408 +      if (urb->number_of_packets == urb_priv->isoc_packet_counter) {
9409 +       /* Complete the urb with status OK. */
9410 +       tc_finish_urb(hcd, urb, 0);
9411 +      }
9412 +    }
9413 +
9414 +  skip_out:
9415 +    myNextRxDesc->status = 0;
9416 +    myNextRxDesc->command |= IO_MASK(USB_IN_command, eol);
9417 +    myLastRxDesc->command &= ~IO_MASK(USB_IN_command, eol);
9418 +    myLastRxDesc = myNextRxDesc;
9419 +    myNextRxDesc = phys_to_virt(myNextRxDesc->next);
9420 +    flush_etrax_cache();
9421 +    *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, restart);
9422 +  }
9423 +
9424 +  local_irq_restore(flags);
9425 +
9426 +  return IRQ_HANDLED;
9427 +}
9428 +
9429 +static void tc_bulk_start_timer_func(unsigned long dummy) {
9430 +  /* We might enable an EP descriptor behind the current DMA position when
9431 +     it's about to decide that there are no more bulk traffic and it should
9432 +     stop the bulk channel.
9433 +     Therefore we periodically check if the bulk channel is stopped and there
9434 +     is an enabled bulk EP descriptor, in which case we start the bulk
9435 +     channel. */
9436 +  
9437 +  if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
9438 +    int epid;
9439 +
9440 +    timer_dbg("bulk_start_timer: Bulk DMA channel not running.\n");
9441 +
9442 +    for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
9443 +      if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
9444 +       timer_warn("Found enabled EP for epid %d, starting bulk channel.\n",
9445 +                  epid);
9446 +       restart_dma8_sub0();
9447 +
9448 +       /* Restart the bulk eot timer since we just started the bulk channel.*/
9449 +       mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
9450 +
9451 +       /* No need to search any further. */
9452 +       break;
9453 +      }
9454 +    }
9455 +  } else {
9456 +    timer_dbg("bulk_start_timer: Bulk DMA channel running.\n");
9457 +  }
9458 +}
9459 +
9460 +static void tc_bulk_eot_timer_func(unsigned long dummy) {
9461 +  struct usb_hcd *hcd = (struct usb_hcd*)dummy;
9462 +  ASSERT(hcd);
9463 +  /* Because of a race condition in the top half, we might miss a bulk eot.
9464 +     This timer "simulates" a bulk eot if we don't get one for a while,
9465 +     hopefully correcting the situation. */
9466 +  timer_dbg("bulk_eot_timer timed out.\n");
9467 +  check_finished_bulk_tx_epids(hcd, 1);
9468 +}
9469 +
9470 +
9471 +/*************************************************************/
9472 +/*************************************************************/
9473 +/* Device driver block                                       */
9474 +/*************************************************************/
9475 +/*************************************************************/
9476 +
9477 +/* Forward declarations for device driver functions */
9478 +static int devdrv_hcd_probe(struct device *);
9479 +static int devdrv_hcd_remove(struct device *);
9480 +#ifdef CONFIG_PM
9481 +static int devdrv_hcd_suspend(struct device *, u32, u32);
9482 +static int devdrv_hcd_resume(struct device *, u32);
9483 +#endif /* CONFIG_PM */
9484 +
9485 +/* the device */
9486 +static struct platform_device *devdrv_hc_platform_device;
9487 +
9488 +/* device driver interface */
9489 +static struct device_driver devdrv_hc_device_driver = {
9490 +  .name =                      (char *) hc_name,
9491 +  .bus =                       &platform_bus_type,
9492 +
9493 +  .probe =             devdrv_hcd_probe,
9494 +  .remove =            devdrv_hcd_remove,
9495 +
9496 +#ifdef CONFIG_PM
9497 +  .suspend =           devdrv_hcd_suspend,
9498 +  .resume =            devdrv_hcd_resume,
9499 +#endif /* CONFIG_PM */
9500 +};
9501  
9502 -       CHECK_ALIGN(&TxIsocEPList[i]);
9503 -       TxIsocEPList[i].hw_len = 0;
9504 -
9505 -       /* Must enable the last EP descr to get eof interrupt. */
9506 -       TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) |
9507 -                                  IO_STATE(USB_EP_command, eof, yes) |
9508 -                                  IO_STATE(USB_EP_command, eol, yes) |
9509 -                                  IO_FIELD(USB_EP_command, epid, INVALID_EPID));
9510 -       TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout);
9511 -       TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]);
9512 -
9513 -       *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]);
9514 -       *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
9515 -
9516 -       DBFEXIT;
9517 -}
9518 -
9519 -static void etrax_usb_unlink_intr_urb(struct urb *urb)
9520 +/* initialize the host controller and driver  */
9521 +static int __init_or_module devdrv_hcd_probe(struct device *dev)
9522  {
9523 -       volatile USB_EP_Desc_t *first_ep;  /* First EP in the list. */
9524 -       volatile USB_EP_Desc_t *curr_ep;   /* Current EP, the iterator. */
9525 -       volatile USB_EP_Desc_t *next_ep;   /* The EP after current. */
9526 -       volatile USB_EP_Desc_t *unlink_ep; /* The one we should remove from the list. */
9527 -
9528 -       int epid;
9529 -
9530 -       /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the List". */
9531 -
9532 -       DBFENTER;
9533 -
9534 -       epid = ((etrax_urb_priv_t *)urb->hcpriv)->epid;
9535 -
9536 -       first_ep = &TxIntrEPList[0];
9537 -       curr_ep = first_ep;
9538 -
9539 -
9540 -       /* Note that this loop removes all EP descriptors with this epid. This assumes
9541 -          that all EP descriptors belong to the one and only urb for this epid. */
9542 -
9543 -       do {
9544 -               next_ep = (USB_EP_Desc_t *)phys_to_virt(curr_ep->next);
9545 -
9546 -               if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
9547 -
9548 -                       dbg_intr("Found EP to unlink for epid %d", epid);
9549 -
9550 -                       /* This is the one we should unlink. */
9551 -                       unlink_ep = next_ep;
9552 -
9553 -                       /* Actually unlink the EP from the DMA list. */
9554 -                       curr_ep->next = unlink_ep->next;
9555 -
9556 -                       /* Wait until the DMA is no longer at this descriptor. */
9557 -                       while (*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep));
9558 +  struct usb_hcd *hcd;
9559 +  struct crisv10_hcd *crisv10_hcd;
9560 +  int retval;
9561 +
9562 +  /* Check DMA burst length */
9563 +  if(IO_EXTRACT(R_BUS_CONFIG, dma_burst, *R_BUS_CONFIG) !=
9564 +     IO_STATE(R_BUS_CONFIG, dma_burst, burst32)) {
9565 +    devdrv_err("Invalid DMA burst length in Etrax 100LX,"
9566 +              " needs to be 32\n");
9567 +    return -EPERM;
9568 +  }
9569 +
9570 +  hcd = usb_create_hcd(&crisv10_hc_driver, dev, dev->bus_id);
9571 +  if (!hcd)
9572 +    return -ENOMEM;
9573 +
9574 +  crisv10_hcd = hcd_to_crisv10_hcd(hcd);
9575 +  spin_lock_init(&crisv10_hcd->lock);
9576 +  crisv10_hcd->num_ports = num_ports();
9577 +  crisv10_hcd->running = 0;
9578 +
9579 +  dev_set_drvdata(dev, crisv10_hcd);
9580 +
9581 +  devdrv_dbg("ETRAX USB IRQs HC:%d  RX:%d  TX:%d\n", ETRAX_USB_HC_IRQ,
9582 +         ETRAX_USB_RX_IRQ, ETRAX_USB_TX_IRQ);
9583 +
9584 +  /* Print out chip version read from registers */
9585 +  int rev_maj = *R_USB_REVISION & IO_MASK(R_USB_REVISION, major);
9586 +  int rev_min = *R_USB_REVISION & IO_MASK(R_USB_REVISION, minor);
9587 +  if(rev_min == 0) {
9588 +    devdrv_info("Etrax 100LX USB Revision %d v1,2\n", rev_maj);
9589 +  } else {
9590 +    devdrv_info("Etrax 100LX USB Revision %d v%d\n", rev_maj, rev_min);
9591 +  }
9592 +
9593 +  devdrv_info("Bulk timer interval, start:%d eot:%d\n",
9594 +             BULK_START_TIMER_INTERVAL,
9595 +             BULK_EOT_TIMER_INTERVAL);
9596 +
9597 +
9598 +  /* Init root hub data structures */
9599 +  if(rh_init()) {
9600 +    devdrv_err("Failed init data for Root Hub\n");
9601 +    retval = -ENOMEM;
9602 +  }
9603 +
9604 +  if(port_in_use(0)) {
9605 +    if (cris_request_io_interface(if_usb_1, "ETRAX100LX USB-HCD")) {
9606 +      printk(KERN_CRIT "usb-host: request IO interface usb1 failed");
9607 +      retval = -EBUSY;
9608 +      goto out;
9609 +    }
9610 +    devdrv_info("Claimed interface for USB physical port 1\n");
9611 +  }
9612 +  if(port_in_use(1)) {
9613 +    if (cris_request_io_interface(if_usb_2, "ETRAX100LX USB-HCD")) {
9614 +      /* Free first interface if second failed to be claimed */
9615 +      if(port_in_use(0)) {
9616 +       cris_free_io_interface(if_usb_1);
9617 +      }
9618 +      printk(KERN_CRIT "usb-host: request IO interface usb2 failed");
9619 +      retval = -EBUSY;
9620 +      goto out;
9621 +    }
9622 +    devdrv_info("Claimed interface for USB physical port 2\n");
9623 +  }
9624 +  
9625 +  /* Init transfer controller structs and locks */
9626 +  if((retval = tc_init(hcd)) != 0) {
9627 +    goto out;
9628 +  }
9629 +
9630 +  /* Attach interrupt functions for DMA and init DMA controller */
9631 +  if((retval = tc_dma_init(hcd)) != 0) {
9632 +    goto out;
9633 +  }
9634 +
9635 +  /* Attach the top IRQ handler for USB controller interrupts */
9636 +  if (request_irq(ETRAX_USB_HC_IRQ, crisv10_hcd_top_irq, 0,
9637 +                 "ETRAX 100LX built-in USB (HC)", hcd)) {
9638 +    err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ);
9639 +    retval = -EBUSY;
9640 +    goto out;
9641 +  }
9642 +
9643 +  /* iso_eof is only enabled when isoc traffic is running. */
9644 +  *R_USB_IRQ_MASK_SET =
9645 +    /* IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) | */
9646 +    IO_STATE(R_USB_IRQ_MASK_SET, bulk_eot, set) |
9647 +    IO_STATE(R_USB_IRQ_MASK_SET, epid_attn, set) |
9648 +    IO_STATE(R_USB_IRQ_MASK_SET, port_status, set) |
9649 +    IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
9650 +
9651 +
9652 +  crisv10_ready_wait();
9653 +  /* Reset the USB interface. */
9654 +  *R_USB_COMMAND =
9655 +    IO_STATE(R_USB_COMMAND, port_sel, nop) |
9656 +    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
9657 +    IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
9658 +
9659 +  /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to
9660 +     0x2A30 (10800), to guarantee that control traffic gets 10% of the
9661 +     bandwidth, and periodic transfer may allocate the rest (90%).
9662 +     This doesn't work though.
9663 +     The value 11960 is chosen to be just after the SOF token, with a couple
9664 +     of bit times extra for possible bit stuffing. */
9665 +  *R_USB_FM_PSTART = IO_FIELD(R_USB_FM_PSTART, value, 11960);
9666 +
9667 +  crisv10_ready_wait();
9668 +  /* Configure the USB interface as a host controller. */
9669 +  *R_USB_COMMAND =
9670 +    IO_STATE(R_USB_COMMAND, port_sel, nop) |
9671 +    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
9672 +    IO_STATE(R_USB_COMMAND, ctrl_cmd, host_config);
9673 +
9674 +
9675 +  /* Check so controller not busy before enabling ports */
9676 +  crisv10_ready_wait();
9677 +
9678 +  /* Enable selected USB ports */
9679 +  if(port_in_use(0)) {
9680 +    *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
9681 +  } else {
9682 +    *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
9683 +  }
9684 +  if(port_in_use(1)) {
9685 +    *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
9686 +  } else {
9687 +    *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
9688 +  }
9689 +
9690 +  crisv10_ready_wait();
9691 +  /* Start processing of USB traffic. */
9692 +  *R_USB_COMMAND =
9693 +    IO_STATE(R_USB_COMMAND, port_sel, nop) |
9694 +    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
9695 +    IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
9696 +
9697 +  /* Do not continue probing initialization before USB interface is done */
9698 +  crisv10_ready_wait();
9699 +
9700 +  /* Register our Host Controller to USB Core
9701 +   * Finish the remaining parts of generic HCD initialization: allocate the
9702 +   * buffers of consistent memory, register the bus
9703 +   * and call the driver's reset() and start() routines. */
9704 +  retval = usb_add_hcd(hcd, ETRAX_USB_HC_IRQ, IRQF_DISABLED);
9705 +  if (retval != 0) {
9706 +    devdrv_err("Failed registering HCD driver\n");
9707 +    goto out;
9708 +  }
9709 +
9710 +  return 0;
9711 +
9712 + out:
9713 +  devdrv_hcd_remove(dev);
9714 +  return retval;
9715 +}
9716 +
9717 +
9718 +/* cleanup after the host controller and driver */
9719 +static int __init_or_module devdrv_hcd_remove(struct device *dev)
9720 +{
9721 +  struct crisv10_hcd *crisv10_hcd = dev_get_drvdata(dev);
9722 +  struct usb_hcd *hcd;
9723 +
9724 +  if (!crisv10_hcd)
9725 +    return 0;
9726 +  hcd = crisv10_hcd_to_hcd(crisv10_hcd);
9727 +
9728 +
9729 +  /* Stop USB Controller in Etrax 100LX */
9730 +  crisv10_hcd_reset(hcd);
9731 +
9732 +  usb_remove_hcd(hcd);
9733 +  devdrv_dbg("Removed HCD from USB Core\n");
9734 +
9735 +  /* Free USB Controller IRQ */
9736 +  free_irq(ETRAX_USB_HC_IRQ, NULL);
9737 +
9738 +  /* Free resources */
9739 +  tc_dma_destroy();
9740 +  tc_destroy();
9741 +
9742 +
9743 +  if(port_in_use(0)) {
9744 +    cris_free_io_interface(if_usb_1);
9745 +  }
9746 +  if(port_in_use(1)) {
9747 +    cris_free_io_interface(if_usb_2);
9748 +  }
9749 +
9750 +  devdrv_dbg("Freed all claimed resources\n");
9751 +
9752 +  return 0;
9753 +}
9754 +
9755 +
9756 +#ifdef CONFIG_PM
9757 +
9758 +static int devdrv_hcd_suspend(struct usb_hcd *hcd, u32 state, u32 level)
9759 +{
9760 +  return 0; /* no-op for now */
9761 +}
9762 +
9763 +static int devdrv_hcd_resume(struct usb_hcd *hcd, u32 level)
9764 +{
9765 +  return 0; /* no-op for now */
9766 +}
9767 +
9768 +#endif /* CONFIG_PM */
9769 +
9770 +
9771 +
9772 +/*************************************************************/
9773 +/*************************************************************/
9774 +/* Module block                                              */
9775 +/*************************************************************/
9776 +/*************************************************************/
9777
9778 +/* register driver */
9779 +static int __init module_hcd_init(void) 
9780 +{
9781 +  
9782 +  if (usb_disabled())
9783 +    return -ENODEV;
9784 +
9785 +  /* Here we select enabled ports by following defines created from
9786 +     menuconfig */
9787 +#ifndef CONFIG_ETRAX_USB_HOST_PORT1
9788 +  ports &= ~(1<<0);
9789 +#endif
9790 +#ifndef CONFIG_ETRAX_USB_HOST_PORT2
9791 +  ports &= ~(1<<1);
9792 +#endif
9793  
9794 -                       /* Now we are free to remove it and its SB descriptor.
9795 -                          Note that it is assumed here that there is only one sb in the
9796 -                          sb list for this ep. */
9797 -                       kmem_cache_free(usb_desc_cache, phys_to_virt(unlink_ep->sub));
9798 -                       kmem_cache_free(usb_desc_cache, (USB_EP_Desc_t *)unlink_ep);
9799 -               }
9800 +  printk(KERN_INFO "%s version "VERSION" "COPYRIGHT"\n", product_desc);
9801  
9802 -               curr_ep = phys_to_virt(curr_ep->next);
9803 +  devdrv_hc_platform_device =
9804 +    platform_device_register_simple((char *) hc_name, 0, NULL, 0);
9805  
9806 -       } while (curr_ep != first_ep);
9807 -        urb->hcpriv = NULL;
9808 +  if (IS_ERR(devdrv_hc_platform_device))
9809 +    return PTR_ERR(devdrv_hc_platform_device);
9810 +  return driver_register(&devdrv_hc_device_driver);
9811 +  /* 
9812 +   * Note that we do not set the DMA mask for the device,
9813 +   * i.e. we pretend that we will use PIO, since no specific
9814 +   * allocation routines are needed for DMA buffers. This will
9815 +   * cause the HCD buffer allocation routines to fall back to
9816 +   * kmalloc().
9817 +   */
9818  }
9819  
9820 -void etrax_usb_do_intr_recover(int epid)
9821 -{
9822 -       USB_EP_Desc_t *first_ep, *tmp_ep;
9823 +/* unregister driver */
9824 +static void __exit module_hcd_exit(void) 
9825 +{      
9826 +  driver_unregister(&devdrv_hc_device_driver);
9827 +}
9828  
9829 -       DBFENTER;
9830 -
9831 -       first_ep = (USB_EP_Desc_t *)phys_to_virt(*R_DMA_CH8_SUB2_EP);
9832 -       tmp_ep = first_ep;
9833 -
9834 -       /* What this does is simply to walk the list of interrupt
9835 -          ep descriptors and enable those that are disabled. */
9836 -
9837 -       do {
9838 -               if (IO_EXTRACT(USB_EP_command, epid, tmp_ep->command) == epid &&
9839 -                   !(tmp_ep->command & IO_MASK(USB_EP_command, enable))) {
9840 -                       tmp_ep->command |= IO_STATE(USB_EP_command, enable, yes);
9841 -               }
9842 -
9843 -               tmp_ep = (USB_EP_Desc_t *)phys_to_virt(tmp_ep->next);
9844 -
9845 -       } while (tmp_ep != first_ep);
9846 -
9847 -
9848 -       DBFEXIT;
9849 -}
9850 -
9851 -static int etrax_rh_unlink_urb (struct urb *urb)
9852 -{
9853 -       etrax_hc_t *hc;
9854 -
9855 -       DBFENTER;
9856 -
9857 -       hc = urb->dev->bus->hcpriv;
9858 -
9859 -       if (hc->rh.urb == urb) {
9860 -               hc->rh.send = 0;
9861 -               del_timer(&hc->rh.rh_int_timer);
9862 -       }
9863 -
9864 -       DBFEXIT;
9865 -       return 0;
9866 -}
9867 -
9868 -static void etrax_rh_send_irq(struct urb *urb)
9869 -{
9870 -       __u16 data = 0;
9871 -       etrax_hc_t *hc = urb->dev->bus->hcpriv;
9872 -       DBFENTER;
9873 -
9874 -/*
9875 -  dbg_rh("R_USB_FM_NUMBER   : 0x%08X", *R_USB_FM_NUMBER);
9876 -  dbg_rh("R_USB_FM_REMAINING: 0x%08X", *R_USB_FM_REMAINING);
9877 -*/
9878 -
9879 -       data |= (hc->rh.wPortChange_1) ? (1 << 1) : 0;
9880 -       data |= (hc->rh.wPortChange_2) ? (1 << 2) : 0;
9881 -
9882 -       *((__u16 *)urb->transfer_buffer) = cpu_to_le16(data);
9883 -       /* FIXME: Why is actual_length set to 1 when data is 2 bytes?
9884 -          Since only 1 byte is used, why not declare data as __u8? */
9885 -       urb->actual_length = 1;
9886 -       urb->status = 0;
9887 -
9888 -       if (hc->rh.send && urb->complete) {
9889 -               dbg_rh("wPortChange_1: 0x%04X", hc->rh.wPortChange_1);
9890 -               dbg_rh("wPortChange_2: 0x%04X", hc->rh.wPortChange_2);
9891 -
9892 -               urb->complete(urb, NULL);
9893 -       }
9894 -
9895 -       DBFEXIT;
9896 -}
9897 -
9898 -static void etrax_rh_init_int_timer(struct urb *urb)
9899 -{
9900 -       etrax_hc_t *hc;
9901 -
9902 -       DBFENTER;
9903 -
9904 -       hc = urb->dev->bus->hcpriv;
9905 -       hc->rh.interval = urb->interval;
9906 -       init_timer(&hc->rh.rh_int_timer);
9907 -       hc->rh.rh_int_timer.function = etrax_rh_int_timer_do;
9908 -       hc->rh.rh_int_timer.data = (unsigned long)urb;
9909 -       /* FIXME: Is the jiffies resolution enough? All intervals < 10 ms will be mapped
9910 -          to 0, and the rest to the nearest lower 10 ms. */
9911 -       hc->rh.rh_int_timer.expires = jiffies + ((HZ * hc->rh.interval) / 1000);
9912 -       add_timer(&hc->rh.rh_int_timer);
9913 -
9914 -       DBFEXIT;
9915 -}
9916 -
9917 -static void etrax_rh_int_timer_do(unsigned long ptr)
9918 -{
9919 -       struct urb *urb;
9920 -       etrax_hc_t *hc;
9921 -
9922 -       DBFENTER;
9923 -
9924 -       urb = (struct urb*)ptr;
9925 -       hc = urb->dev->bus->hcpriv;
9926 -
9927 -       if (hc->rh.send) {
9928 -               etrax_rh_send_irq(urb);
9929 -       }
9930 -
9931 -       DBFEXIT;
9932 -}
9933 -
9934 -static int etrax_usb_setup_epid(struct urb *urb)
9935 -{
9936 -       int epid;
9937 -       char devnum, endpoint, out_traffic, slow;
9938 -       int maxlen;
9939 -       unsigned long flags;
9940 -
9941 -       DBFENTER;
9942 -
9943 -       epid = etrax_usb_lookup_epid(urb);
9944 -       if ((epid != -1)){
9945 -               /* An epid that fits this urb has been found. */
9946 -               DBFEXIT;
9947 -               return epid;
9948 -       }
9949 -
9950 -       /* We must find and initiate a new epid for this urb. */
9951 -       epid = etrax_usb_allocate_epid();
9952 -
9953 -       if (epid == -1) {
9954 -               /* Failed to allocate a new epid. */
9955 -               DBFEXIT;
9956 -               return epid;
9957 -       }
9958 -
9959 -       /* We now have a new epid to use. Initiate it. */
9960 -       set_bit(epid, (void *)&epid_usage_bitmask);
9961 -
9962 -       devnum = usb_pipedevice(urb->pipe);
9963 -       endpoint = usb_pipeendpoint(urb->pipe);
9964 -       slow = usb_pipeslow(urb->pipe);
9965 -       maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
9966 -       if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
9967 -               /* We want both IN and OUT control traffic to be put on the same EP/SB list. */
9968 -               out_traffic = 1;
9969 -       } else {
9970 -               out_traffic = usb_pipeout(urb->pipe);
9971 -       }
9972 -
9973 -       save_flags(flags);
9974 -       cli();
9975 -
9976 -       *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
9977 -       nop();
9978 -
9979 -       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
9980 -               *R_USB_EPT_DATA_ISO = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) |
9981 -                       /* FIXME: Change any to the actual port? */
9982 -                       IO_STATE(R_USB_EPT_DATA_ISO, port, any) |
9983 -                       IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) |
9984 -                       IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) |
9985 -                       IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum);
9986 -       } else {
9987 -               *R_USB_EPT_DATA = IO_STATE(R_USB_EPT_DATA, valid, yes) |
9988 -                       IO_FIELD(R_USB_EPT_DATA, low_speed, slow) |
9989 -                       /* FIXME: Change any to the actual port? */
9990 -                       IO_STATE(R_USB_EPT_DATA, port, any) |
9991 -                       IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) |
9992 -                       IO_FIELD(R_USB_EPT_DATA, ep, endpoint) |
9993 -                       IO_FIELD(R_USB_EPT_DATA, dev, devnum);
9994 -       }
9995 -
9996 -       restore_flags(flags);
9997 -
9998 -       if (out_traffic) {
9999 -               set_bit(epid, (void *)&epid_out_traffic);
10000 -       } else {
10001 -               clear_bit(epid, (void *)&epid_out_traffic);
10002 -       }
10003 -
10004 -       dbg_epid("Setting up epid %d with devnum %d, endpoint %d and max_len %d (%s)",
10005 -                epid, devnum, endpoint, maxlen, out_traffic ? "OUT" : "IN");
10006 -
10007 -       DBFEXIT;
10008 -       return epid;
10009 -}
10010 -
10011 -static void etrax_usb_free_epid(int epid)
10012 -{
10013 -       unsigned long flags;
10014 -
10015 -       DBFENTER;
10016 -
10017 -       if (!test_bit(epid, (void *)&epid_usage_bitmask)) {
10018 -               warn("Trying to free unused epid %d", epid);
10019 -               DBFEXIT;
10020 -               return;
10021 -       }
10022 -
10023 -       save_flags(flags);
10024 -       cli();
10025 -
10026 -       *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
10027 -       nop();
10028 -       while (*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold));
10029 -       /* This will, among other things, set the valid field to 0. */
10030 -       *R_USB_EPT_DATA = 0;
10031 -       restore_flags(flags);
10032 -
10033 -       clear_bit(epid, (void *)&epid_usage_bitmask);
10034 -
10035 -
10036 -       dbg_epid("Freed epid %d", epid);
10037 -
10038 -       DBFEXIT;
10039 -}
10040 -
10041 -static int etrax_usb_lookup_epid(struct urb *urb)
10042 -{
10043 -       int i;
10044 -       __u32 data;
10045 -       char devnum, endpoint, slow, out_traffic;
10046 -       int maxlen;
10047 -       unsigned long flags;
10048 -
10049 -       DBFENTER;
10050 -
10051 -       devnum = usb_pipedevice(urb->pipe);
10052 -       endpoint = usb_pipeendpoint(urb->pipe);
10053 -       slow = usb_pipeslow(urb->pipe);
10054 -       maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
10055 -       if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
10056 -               /* We want both IN and OUT control traffic to be put on the same EP/SB list. */
10057 -               out_traffic = 1;
10058 -       } else {
10059 -               out_traffic = usb_pipeout(urb->pipe);
10060 -       }
10061 -
10062 -       /* Step through att epids. */
10063 -       for (i = 0; i < NBR_OF_EPIDS; i++) {
10064 -               if (test_bit(i, (void *)&epid_usage_bitmask) &&
10065 -                   test_bit(i, (void *)&epid_out_traffic) == out_traffic) {
10066 -
10067 -                       save_flags(flags);
10068 -                       cli();
10069 -                       *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, i);
10070 -                       nop();
10071 -
10072 -                       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
10073 -                               data = *R_USB_EPT_DATA_ISO;
10074 -                               restore_flags(flags);
10075 -
10076 -                               if ((IO_MASK(R_USB_EPT_DATA_ISO, valid) & data) &&
10077 -                                   (IO_EXTRACT(R_USB_EPT_DATA_ISO, dev, data) == devnum) &&
10078 -                                   (IO_EXTRACT(R_USB_EPT_DATA_ISO, ep, data) == endpoint) &&
10079 -                                   (IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len, data) == maxlen)) {
10080 -                                       dbg_epid("Found epid %d for devnum %d, endpoint %d (%s)",
10081 -                                                i, devnum, endpoint, out_traffic ? "OUT" : "IN");
10082 -                                       DBFEXIT;
10083 -                                       return i;
10084 -                               }
10085 -                       } else {
10086 -                               data = *R_USB_EPT_DATA;
10087 -                               restore_flags(flags);
10088 -
10089 -                               if ((IO_MASK(R_USB_EPT_DATA, valid) & data) &&
10090 -                                   (IO_EXTRACT(R_USB_EPT_DATA, dev, data) == devnum) &&
10091 -                                   (IO_EXTRACT(R_USB_EPT_DATA, ep, data) == endpoint) &&
10092 -                                   (IO_EXTRACT(R_USB_EPT_DATA, low_speed, data) == slow) &&
10093 -                                   (IO_EXTRACT(R_USB_EPT_DATA, max_len, data) == maxlen)) {
10094 -                                       dbg_epid("Found epid %d for devnum %d, endpoint %d (%s)",
10095 -                                                i, devnum, endpoint, out_traffic ? "OUT" : "IN");
10096 -                                       DBFEXIT;
10097 -                                       return i;
10098 -                               }
10099 -                       }
10100 -               }
10101 -       }
10102 -
10103 -       DBFEXIT;
10104 -       return -1;
10105 -}
10106 -
10107 -static int etrax_usb_allocate_epid(void)
10108 -{
10109 -       int i;
10110 -
10111 -       DBFENTER;
10112 -
10113 -       for (i = 0; i < NBR_OF_EPIDS; i++) {
10114 -               if (!test_bit(i, (void *)&epid_usage_bitmask)) {
10115 -                       dbg_epid("Found free epid %d", i);
10116 -                       DBFEXIT;
10117 -                       return i;
10118 -               }
10119 -       }
10120 -
10121 -       dbg_epid("Found no free epids");
10122 -       DBFEXIT;
10123 -       return -1;
10124 -}
10125 -
10126 -static int etrax_usb_submit_urb(struct urb *urb, unsigned mem_flags)
10127 -{
10128 -       etrax_hc_t *hc;
10129 -       int ret = -EINVAL;
10130 -
10131 -       DBFENTER;
10132 -
10133 -       if (!urb->dev || !urb->dev->bus) {
10134 -               return -ENODEV;
10135 -       }
10136 -       if (usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)) <= 0) {
10137 -               info("Submit urb to pipe with maxpacketlen 0, pipe 0x%X\n", urb->pipe);
10138 -               return -EMSGSIZE;
10139 -       }
10140 -
10141 -       if (urb->timeout) {
10142 -               /* FIXME. */
10143 -               warn("urb->timeout specified, ignoring.");
10144 -       }
10145 -
10146 -       hc = (etrax_hc_t*)urb->dev->bus->hcpriv;
10147 -
10148 -       if (usb_pipedevice(urb->pipe) == hc->rh.devnum) {
10149 -               /* This request is for the Virtual Root Hub. */
10150 -               ret = etrax_rh_submit_urb(urb);
10151 -
10152 -       } else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
10153 -
10154 -               ret = etrax_usb_submit_bulk_urb(urb);
10155 -
10156 -       } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
10157 -
10158 -               ret = etrax_usb_submit_ctrl_urb(urb);
10159 -
10160 -       } else if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
10161 -               int bustime;
10162 -
10163 -               if (urb->bandwidth == 0) {
10164 -                       bustime = usb_check_bandwidth(urb->dev, urb);
10165 -                       if (bustime < 0) {
10166 -                               ret = bustime;
10167 -                       } else {
10168 -                               ret = etrax_usb_submit_intr_urb(urb);
10169 -                               if (ret == 0)
10170 -                                       usb_claim_bandwidth(urb->dev, urb, bustime, 0);
10171 -                       }
10172 -               } else {
10173 -                       /* Bandwidth already set. */
10174 -                       ret = etrax_usb_submit_intr_urb(urb);
10175 -               }
10176 -
10177 -       } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
10178 -               int bustime;
10179 -
10180 -               if (urb->bandwidth == 0) {
10181 -                       bustime = usb_check_bandwidth(urb->dev, urb);
10182 -                       if (bustime < 0) {
10183 -                               ret = bustime;
10184 -                       } else {
10185 -                               ret = etrax_usb_submit_isoc_urb(urb);
10186 -                               if (ret == 0)
10187 -                                       usb_claim_bandwidth(urb->dev, urb, bustime, 0);
10188 -                       }
10189 -               } else {
10190 -                       /* Bandwidth already set. */
10191 -                       ret = etrax_usb_submit_isoc_urb(urb);
10192 -               }
10193 -       }
10194 -
10195 -       DBFEXIT;
10196 -
10197 -        if (ret != 0)
10198 -          printk("Submit URB error %d\n", ret);
10199 -
10200 -       return ret;
10201 -}
10202 -
10203 -static int etrax_usb_unlink_urb(struct urb *urb, int status)
10204 -{
10205 -       etrax_hc_t *hc;
10206 -       etrax_urb_priv_t *urb_priv;
10207 -       int epid;
10208 -       unsigned int flags;
10209 -
10210 -       DBFENTER;
10211 -
10212 -       if (!urb) {
10213 -               return -EINVAL;
10214 -       }
10215 -
10216 -       /* Disable interrupts here since a descriptor interrupt for the isoc epid
10217 -          will modify the sb list.  This could possibly be done more granular, but
10218 -          unlink_urb should not be used frequently anyway.
10219 -       */
10220 -
10221 -       save_flags(flags);
10222 -       cli();
10223 -
10224 -       if (!urb->dev || !urb->dev->bus) {
10225 -               restore_flags(flags);
10226 -               return -ENODEV;
10227 -       }
10228 -       if (!urb->hcpriv) {
10229 -               /* This happens if a device driver calls unlink on an urb that
10230 -                  was never submitted (lazy driver) or if the urb was completed
10231 -                  while unlink was being called. */
10232 -               restore_flags(flags);
10233 -               return 0;
10234 -       }
10235 -       if (urb->transfer_flags & URB_ASYNC_UNLINK) {
10236 -               /* FIXME. */
10237 -               /* If URB_ASYNC_UNLINK is set:
10238 -                  unlink
10239 -                  move to a separate urb list
10240 -                  call complete at next sof with ECONNRESET
10241 -
10242 -                  If not:
10243 -                  wait 1 ms
10244 -                  unlink
10245 -                  call complete with ENOENT
10246 -               */
10247 -               warn("URB_ASYNC_UNLINK set, ignoring.");
10248 -       }
10249 -
10250 -       /* One might think that urb->status = -EINPROGRESS would be a requirement for unlinking,
10251 -          but that doesn't work for interrupt and isochronous traffic since they are completed
10252 -          repeatedly, and urb->status is set then. That may in itself be a bug though. */
10253 -
10254 -       hc = urb->dev->bus->hcpriv;
10255 -       urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
10256 -       epid = urb_priv->epid;
10257 -
10258 -       /* Set the urb status (synchronous unlink). */
10259 -       urb->status = -ENOENT;
10260 -       urb_priv->urb_state = UNLINK;
10261 -
10262 -       if (usb_pipedevice(urb->pipe) == hc->rh.devnum) {
10263 -               int ret;
10264 -               ret = etrax_rh_unlink_urb(urb);
10265 -               DBFEXIT;
10266 -               restore_flags(flags);
10267 -               return ret;
10268 -
10269 -       } else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
10270 -
10271 -               dbg_bulk("Unlink of bulk urb (0x%lx)", (unsigned long)urb);
10272 -
10273 -               if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
10274 -                       /* The EP was enabled, disable it and wait. */
10275 -                       TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
10276 -
10277 -                       /* Ah, the luxury of busy-wait. */
10278 -                       while (*R_DMA_CH8_SUB0_EP == virt_to_phys(&TxBulkEPList[epid]));
10279 -               }
10280 -               /* Kicking dummy list out of the party. */
10281 -               TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
10282 -
10283 -       } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
10284 -
10285 -               dbg_ctrl("Unlink of ctrl urb (0x%lx)", (unsigned long)urb);
10286 -
10287 -               if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
10288 -                       /* The EP was enabled, disable it and wait. */
10289 -                       TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
10290 -
10291 -                       /* Ah, the luxury of busy-wait. */
10292 -                       while (*R_DMA_CH8_SUB1_EP == virt_to_phys(&TxCtrlEPList[epid]));
10293 -               }
10294 -
10295 -       } else if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
10296 -
10297 -               dbg_intr("Unlink of intr urb (0x%lx)", (unsigned long)urb);
10298 -
10299 -               /* Separate function because it's a tad more complicated. */
10300 -               etrax_usb_unlink_intr_urb(urb);
10301 -
10302 -       } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
10303 -
10304 -               dbg_isoc("Unlink of isoc urb (0x%lx)", (unsigned long)urb);
10305 -
10306 -               if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
10307 -                       /* The EP was enabled, disable it and wait. */
10308 -                       TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
10309 -
10310 -                       /* Ah, the luxury of busy-wait. */
10311 -                       while (*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid]));
10312 -               }
10313 -       }
10314 -
10315 -       /* Note that we need to remove the urb from the urb list *before* removing its SB
10316 -          descriptors. (This means that the isoc eof handler might get a null urb when we
10317 -          are unlinking the last urb.) */
10318 -
10319 -       if (usb_pipetype(urb->pipe) == PIPE_BULK) {
10320 -
10321 -               urb_list_del(urb, epid);
10322 -               TxBulkEPList[epid].sub = 0;
10323 -               etrax_remove_from_sb_list(urb);
10324 -
10325 -       } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
10326 -
10327 -               urb_list_del(urb, epid);
10328 -               TxCtrlEPList[epid].sub = 0;
10329 -               etrax_remove_from_sb_list(urb);
10330 -
10331 -       } else if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
10332 -
10333 -               urb_list_del(urb, epid);
10334 -               /* Sanity check (should never happen). */
10335 -               assert(urb_list_empty(epid));
10336 -
10337 -               /* Release allocated bandwidth. */
10338 -               usb_release_bandwidth(urb->dev, urb, 0);
10339 -
10340 -       } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
10341 -
10342 -               if (usb_pipeout(urb->pipe)) {
10343 -
10344 -                       USB_SB_Desc_t *iter_sb, *prev_sb, *next_sb;
10345 -
10346 -                       if (__urb_list_entry(urb, epid)) {
10347 -
10348 -                               urb_list_del(urb, epid);
10349 -                               iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
10350 -                               prev_sb = 0;
10351 -                               while (iter_sb && (iter_sb != urb_priv->first_sb)) {
10352 -                                       prev_sb = iter_sb;
10353 -                                       iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
10354 -                               }
10355 -
10356 -                               if (iter_sb == 0) {
10357 -                                       /* Unlink of the URB currently being transmitted. */
10358 -                                       prev_sb = 0;
10359 -                                       iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
10360 -                               }
10361 -
10362 -                               while (iter_sb && (iter_sb != urb_priv->last_sb)) {
10363 -                                       iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
10364 -                               }
10365 -                               if (iter_sb) {
10366 -                                       next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
10367 -                               } else {
10368 -                                       /* This should only happen if the DMA has completed
10369 -                                          processing the SB list for this EP while interrupts
10370 -                                          are disabled. */
10371 -                                       dbg_isoc("Isoc urb not found, already sent?");
10372 -                                       next_sb = 0;
10373 -                               }
10374 -                               if (prev_sb) {
10375 -                                       prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
10376 -                               } else {
10377 -                                       TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
10378 -                               }
10379 -
10380 -                               etrax_remove_from_sb_list(urb);
10381 -                               if (urb_list_empty(epid)) {
10382 -                                       TxIsocEPList[epid].sub = 0;
10383 -                                       dbg_isoc("Last isoc out urb epid %d", epid);
10384 -                               } else if (next_sb || prev_sb) {
10385 -                                       dbg_isoc("Re-enable isoc out epid %d", epid);
10386 -
10387 -                                       TxIsocEPList[epid].hw_len = 0;
10388 -                                       TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
10389 -                               } else {
10390 -                                       TxIsocEPList[epid].sub = 0;
10391 -                                       dbg_isoc("URB list non-empty and no SB list, EP disabled");
10392 -                               }
10393 -                       } else {
10394 -                               dbg_isoc("Urb 0x%p not found, completed already?", urb);
10395 -                       }
10396 -               } else {
10397 -
10398 -                       urb_list_del(urb, epid);
10399 -
10400 -                       /* For in traffic there is only one SB descriptor for each EP even
10401 -                          though there may be several urbs (all urbs point at the same SB). */
10402 -                       if (urb_list_empty(epid)) {
10403 -                               /* No more urbs, remove the SB. */
10404 -                               TxIsocEPList[epid].sub = 0;
10405 -                               etrax_remove_from_sb_list(urb);
10406 -                       } else {
10407 -                               TxIsocEPList[epid].hw_len = 0;
10408 -                               TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
10409 -                       }
10410 -               }
10411 -               /* Release allocated bandwidth. */
10412 -               usb_release_bandwidth(urb->dev, urb, 1);
10413 -       }
10414 -       /* Free the epid if urb list is empty. */
10415 -       if (urb_list_empty(epid)) {
10416 -               etrax_usb_free_epid(epid);
10417 -       }
10418 -       restore_flags(flags);
10419 -
10420 -       /* Must be done before calling completion handler. */
10421 -       kfree(urb_priv);
10422 -       urb->hcpriv = 0;
10423 -
10424 -       if (urb->complete) {
10425 -               urb->complete(urb, NULL);
10426 -       }
10427 -
10428 -       DBFEXIT;
10429 -       return 0;
10430 -}
10431 -
10432 -static int etrax_usb_get_frame_number(struct usb_device *usb_dev)
10433 -{
10434 -       DBFENTER;
10435 -       DBFEXIT;
10436 -       return (*R_USB_FM_NUMBER & 0x7ff);
10437 -}
10438 -
10439 -static irqreturn_t etrax_usb_tx_interrupt(int irq, void *vhc)
10440 -{
10441 -       DBFENTER;
10442 -
10443 -       /* This interrupt handler could be used when unlinking EP descriptors. */
10444 -
10445 -       if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub0_descr)) {
10446 -               USB_EP_Desc_t *ep;
10447 -
10448 -               //dbg_bulk("dma8_sub0_descr (BULK) intr.");
10449 -
10450 -               /* It should be safe clearing the interrupt here, since we don't expect to get a new
10451 -                  one until we restart the bulk channel. */
10452 -               *R_DMA_CH8_SUB0_CLR_INTR = IO_STATE(R_DMA_CH8_SUB0_CLR_INTR, clr_descr, do);
10453 -
10454 -               /* Wait while the DMA is running (though we don't expect it to be). */
10455 -               while (*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd));
10456 -
10457 -               /* Advance the DMA to the next EP descriptor. */
10458 -               ep = (USB_EP_Desc_t *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
10459 -
10460 -               //dbg_bulk("descr intr: DMA is at 0x%lx", (unsigned long)ep);
10461 -
10462 -               /* ep->next is already a physical address; no need for a virt_to_phys. */
10463 -               *R_DMA_CH8_SUB0_EP = ep->next;
10464 -
10465 -               /* Start the DMA bulk channel again. */
10466 -               *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
10467 -       }
10468 -       if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub1_descr)) {
10469 -               struct urb *urb;
10470 -               int epid;
10471 -               etrax_urb_priv_t *urb_priv;
10472 -               unsigned long int flags;
10473 -
10474 -               dbg_ctrl("dma8_sub1_descr (CTRL) intr.");
10475 -               *R_DMA_CH8_SUB1_CLR_INTR = IO_STATE(R_DMA_CH8_SUB1_CLR_INTR, clr_descr, do);
10476 -
10477 -               /* The complete callback gets called so we cli. */
10478 -               save_flags(flags);
10479 -               cli();
10480 -
10481 -               for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
10482 -                       if ((TxCtrlEPList[epid].sub == 0) ||
10483 -                           (epid == DUMMY_EPID) ||
10484 -                           (epid == INVALID_EPID)) {
10485 -                               /* Nothing here to see. */
10486 -                               continue;
10487 -                       }
10488 -
10489 -                       /* Get the first urb (if any). */
10490 -                       urb = urb_list_first(epid);
10491 -
10492 -                       if (urb) {
10493 -
10494 -                               /* Sanity check. */
10495 -                               assert(usb_pipetype(urb->pipe) == PIPE_CONTROL);
10496 -
10497 -                               urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
10498 -                               assert(urb_priv);
10499 -
10500 -                               if (urb_priv->urb_state == WAITING_FOR_DESCR_INTR) {
10501 -                                       assert(!(TxCtrlEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)));
10502 -
10503 -                                       etrax_usb_complete_urb(urb, 0);
10504 -                               }
10505 -                       }
10506 -               }
10507 -               restore_flags(flags);
10508 -       }
10509 -       if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub2_descr)) {
10510 -               dbg_intr("dma8_sub2_descr (INTR) intr.");
10511 -               *R_DMA_CH8_SUB2_CLR_INTR = IO_STATE(R_DMA_CH8_SUB2_CLR_INTR, clr_descr, do);
10512 -       }
10513 -       if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub3_descr)) {
10514 -               struct urb *urb;
10515 -               int epid;
10516 -               int epid_done;
10517 -               etrax_urb_priv_t *urb_priv;
10518 -               USB_SB_Desc_t *sb_desc;
10519 -
10520 -               usb_isoc_complete_data_t *comp_data = NULL;
10521 -
10522 -               /* One or more isoc out transfers are done. */
10523 -               dbg_isoc("dma8_sub3_descr (ISOC) intr.");
10524 -
10525 -               /* For each isoc out EP search for the first sb_desc with the intr flag
10526 -                  set.  This descriptor must be the last packet from an URB.  Then
10527 -                  traverse the URB list for the EP until the URB with urb_priv->last_sb
10528 -                  matching the intr-marked sb_desc is found.  All URBs before this have
10529 -                  been sent.
10530 -               */
10531 -
10532 -               for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
10533 -                       /* Skip past epids with no SB lists, epids used for in traffic,
10534 -                          and special (dummy, invalid) epids. */
10535 -                       if ((TxIsocEPList[epid].sub == 0) ||
10536 -                           (test_bit(epid, (void *)&epid_out_traffic) == 0) ||
10537 -                           (epid == DUMMY_EPID) ||
10538 -                           (epid == INVALID_EPID)) {
10539 -                               /* Nothing here to see. */
10540 -                               continue;
10541 -                       }
10542 -                       sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
10543 -
10544 -                       /* Find the last descriptor of the currently active URB for this ep.
10545 -                          This is the first descriptor in the sub list marked for a descriptor
10546 -                          interrupt. */
10547 -                       while (sb_desc && !IO_EXTRACT(USB_SB_command, intr, sb_desc->command)) {
10548 -                               sb_desc = sb_desc->next ? phys_to_virt(sb_desc->next) : 0;
10549 -                       }
10550 -                       assert(sb_desc);
10551 -
10552 -                       dbg_isoc("Check epid %d, sub 0x%p, SB 0x%p",
10553 -                                epid,
10554 -                                phys_to_virt(TxIsocEPList[epid].sub),
10555 -                                sb_desc);
10556 -
10557 -                       epid_done = 0;
10558 -
10559 -                       /* Get the first urb (if any). */
10560 -                       urb = urb_list_first(epid);
10561 -                       assert(urb);
10562 -
10563 -                       while (urb && !epid_done) {
10564 -
10565 -                               /* Sanity check. */
10566 -                               assert(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
10567 -
10568 -                               if (!usb_pipeout(urb->pipe)) {
10569 -                                       /* descr interrupts are generated only for out pipes. */
10570 -                                       epid_done = 1;
10571 -                                       continue;
10572 -                               }
10573 -
10574 -                               urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
10575 -                               assert(urb_priv);
10576 -
10577 -                               if (sb_desc != urb_priv->last_sb) {
10578 -
10579 -                                       /* This urb has been sent. */
10580 -                                       dbg_isoc("out URB 0x%p sent", urb);
10581 -
10582 -                                       urb_priv->urb_state = TRANSFER_DONE;
10583 -
10584 -                               } else if ((sb_desc == urb_priv->last_sb) &&
10585 -                                          !(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
10586 -
10587 -                                       assert((sb_desc->command & IO_MASK(USB_SB_command, eol)) == IO_STATE(USB_SB_command, eol, yes));
10588 -                                       assert(sb_desc->next == 0);
10589 -
10590 -                                       dbg_isoc("out URB 0x%p last in list, epid disabled", urb);
10591 -                                       TxIsocEPList[epid].sub = 0;
10592 -                                       TxIsocEPList[epid].hw_len = 0;
10593 -                                       urb_priv->urb_state = TRANSFER_DONE;
10594 -
10595 -                                       epid_done = 1;
10596 -
10597 -                               } else {
10598 -                                       epid_done = 1;
10599 -                               }
10600 -                               if (!epid_done) {
10601 -                                       urb = urb_list_next(urb, epid);
10602 -                               }
10603 -                       }
10604 -
10605 -               }
10606 -
10607 -               *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
10608 -
10609 -               comp_data = (usb_isoc_complete_data_t*)kmem_cache_alloc(isoc_compl_cache, SLAB_ATOMIC);
10610 -               assert(comp_data != NULL);
10611 -
10612 -                INIT_WORK(&comp_data->usb_bh, etrax_usb_isoc_descr_interrupt_bottom_half, comp_data);
10613 -                schedule_work(&comp_data->usb_bh);
10614 -       }
10615 -
10616 -       DBFEXIT;
10617 -        return IRQ_HANDLED;
10618 -}
10619 -
10620 -static void etrax_usb_isoc_descr_interrupt_bottom_half(void *data)
10621 -{
10622 -       usb_isoc_complete_data_t *comp_data = (usb_isoc_complete_data_t*)data;
10623 -
10624 -       struct urb *urb;
10625 -       int epid;
10626 -       int epid_done;
10627 -       etrax_urb_priv_t *urb_priv;
10628 -
10629 -       DBFENTER;
10630 -
10631 -       dbg_isoc("dma8_sub3_descr (ISOC) bottom half.");
10632 -
10633 -       for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
10634 -               unsigned long flags;
10635 -
10636 -               save_flags(flags);
10637 -               cli();
10638 -
10639 -               epid_done = 0;
10640 -
10641 -               /* The descriptor interrupt handler has marked all transmitted isoch. out
10642 -                  URBs with TRANSFER_DONE.  Now we traverse all epids and for all that
10643 -                  have isoch. out traffic traverse its URB list and complete the
10644 -                  transmitted URB.
10645 -               */
10646 -
10647 -               while (!epid_done) {
10648 -
10649 -                       /* Get the first urb (if any). */
10650 -                       urb = urb_list_first(epid);
10651 -                       if (urb == 0) {
10652 -                               epid_done = 1;
10653 -                               continue;
10654 -                       }
10655 -
10656 -                       if (usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) {
10657 -                                       epid_done = 1;
10658 -                                       continue;
10659 -                       }
10660 -
10661 -                       if (!usb_pipeout(urb->pipe)) {
10662 -                               /* descr interrupts are generated only for out pipes. */
10663 -                               epid_done = 1;
10664 -                               continue;
10665 -                       }
10666 -
10667 -                       dbg_isoc("Check epid %d, SB 0x%p", epid, (char*)TxIsocEPList[epid].sub);
10668 -
10669 -                       urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
10670 -                       assert(urb_priv);
10671 -
10672 -                       if (urb_priv->urb_state == TRANSFER_DONE) {
10673 -                               int i;
10674 -                               struct usb_iso_packet_descriptor *packet;
10675 -
10676 -                               /* This urb has been sent. */
10677 -                               dbg_isoc("Completing isoc out URB 0x%p", urb);
10678 -
10679 -                               for (i = 0; i < urb->number_of_packets; i++) {
10680 -                                       packet = &urb->iso_frame_desc[i];
10681 -                                       packet->status = 0;
10682 -                                       packet->actual_length = packet->length;
10683 -                               }
10684 -
10685 -                               etrax_usb_complete_isoc_urb(urb, 0);
10686 -
10687 -                               if (urb_list_empty(epid)) {
10688 -                                       etrax_usb_free_epid(epid);
10689 -                                       epid_done = 1;
10690 -                               }
10691 -                       } else {
10692 -                               epid_done = 1;
10693 -                       }
10694 -               }
10695 -               restore_flags(flags);
10696 -
10697 -       }
10698 -       kmem_cache_free(isoc_compl_cache, comp_data);
10699 -
10700 -       DBFEXIT;
10701 -}
10702 -
10703 -
10704 -
10705 -static irqreturn_t etrax_usb_rx_interrupt(int irq, void *vhc)
10706 -{
10707 -       struct urb *urb;
10708 -       etrax_urb_priv_t *urb_priv;
10709 -       int epid = 0;
10710 -       unsigned long flags;
10711 -
10712 -       /* Isoc diagnostics. */
10713 -       static int curr_fm = 0;
10714 -       static int prev_fm = 0;
10715 -
10716 -       DBFENTER;
10717 -
10718 -       /* Clear this interrupt. */
10719 -       *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do);
10720 -
10721 -       /* Note that this while loop assumes that all packets span only
10722 -          one rx descriptor. */
10723 -
10724 -       /* The reason we cli here is that we call the driver's callback functions. */
10725 -       save_flags(flags);
10726 -       cli();
10727 -
10728 -       while (myNextRxDesc->status & IO_MASK(USB_IN_status, eop)) {
10729 -
10730 -               epid = IO_EXTRACT(USB_IN_status, epid, myNextRxDesc->status);
10731 -               urb = urb_list_first(epid);
10732 -
10733 -               //printk("eop for epid %d, first urb 0x%lx\n", epid, (unsigned long)urb);
10734 -
10735 -               if (!urb) {
10736 -                       err("No urb for epid %d in rx interrupt", epid);
10737 -                       __dump_ept_data(epid);
10738 -                       goto skip_out;
10739 -               }
10740 -
10741 -               /* Note that we cannot indescriminately assert(usb_pipein(urb->pipe)) since
10742 -                  ctrl pipes are not. */
10743 -
10744 -               if (myNextRxDesc->status & IO_MASK(USB_IN_status, error)) {
10745 -                       __u32 r_usb_ept_data;
10746 -                       int no_error = 0;
10747 -
10748 -                       assert(test_bit(epid, (void *)&epid_usage_bitmask));
10749 -
10750 -                       *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
10751 -                       nop();
10752 -                       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
10753 -                               r_usb_ept_data = *R_USB_EPT_DATA_ISO;
10754 -
10755 -                               if ((r_usb_ept_data & IO_MASK(R_USB_EPT_DATA_ISO, valid)) &&
10756 -                                   (IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data) == 0) &&
10757 -                                   (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata))) {
10758 -                                       /* Not an error, just a failure to receive an expected iso
10759 -                                          in packet in this frame.  This is not documented
10760 -                                          in the designers reference.
10761 -                                       */
10762 -                                       no_error++;
10763 -                               } else {
10764 -                                       warn("R_USB_EPT_DATA_ISO for epid %d = 0x%x", epid, r_usb_ept_data);
10765 -                               }
10766 -                       } else {
10767 -                               r_usb_ept_data = *R_USB_EPT_DATA;
10768 -                               warn("R_USB_EPT_DATA for epid %d = 0x%x", epid, r_usb_ept_data);
10769 -                       }
10770 -
10771 -                       if (!no_error){
10772 -                               warn("error in rx desc->status, epid %d, first urb = 0x%lx",
10773 -                                    epid, (unsigned long)urb);
10774 -                               __dump_in_desc(myNextRxDesc);
10775 -
10776 -                               warn("R_USB_STATUS = 0x%x", *R_USB_STATUS);
10777 -
10778 -                               /* Check that ept was disabled when error occurred. */
10779 -                               switch (usb_pipetype(urb->pipe)) {
10780 -                               case PIPE_BULK:
10781 -                                       assert(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
10782 -                                       break;
10783 -                               case PIPE_CONTROL:
10784 -                                       assert(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
10785 -                                       break;
10786 -                               case PIPE_INTERRUPT:
10787 -                                       assert(!(TxIntrEPList[epid].command & IO_MASK(USB_EP_command, enable)));
10788 -                                       break;
10789 -                               case PIPE_ISOCHRONOUS:
10790 -                                       assert(!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)));
10791 -                                       break;
10792 -                               default:
10793 -                                       warn("etrax_usb_rx_interrupt: bad pipetype %d in urb 0x%p",
10794 -                                            usb_pipetype(urb->pipe),
10795 -                                            urb);
10796 -                               }
10797 -                               etrax_usb_complete_urb(urb, -EPROTO);
10798 -                               goto skip_out;
10799 -                       }
10800 -               }
10801 -
10802 -               urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
10803 -               assert(urb_priv);
10804 -
10805 -               if ((usb_pipetype(urb->pipe) == PIPE_BULK) ||
10806 -                   (usb_pipetype(urb->pipe) == PIPE_CONTROL) ||
10807 -                   (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
10808 -
10809 -                       if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
10810 -                               /* We get nodata for empty data transactions, and the rx descriptor's
10811 -                                  hw_len field is not valid in that case. No data to copy in other
10812 -                                  words. */
10813 -                       } else {
10814 -                               /* Make sure the data fits in the buffer. */
10815 -                               assert(urb_priv->rx_offset + myNextRxDesc->hw_len
10816 -                                      <= urb->transfer_buffer_length);
10817 -
10818 -                               memcpy(urb->transfer_buffer + urb_priv->rx_offset,
10819 -                                      phys_to_virt(myNextRxDesc->buf), myNextRxDesc->hw_len);
10820 -                               urb_priv->rx_offset += myNextRxDesc->hw_len;
10821 -                       }
10822 -
10823 -                       if (myNextRxDesc->status & IO_MASK(USB_IN_status, eot)) {
10824 -                               if ((usb_pipetype(urb->pipe) == PIPE_CONTROL) &&
10825 -                                   ((TxCtrlEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)) ==
10826 -                                    IO_STATE(USB_EP_command, enable, yes))) {
10827 -                                       /* The EP is still enabled, so the OUT packet used to ack
10828 -                                          the in data is probably not processed yet.  If the EP
10829 -                                          sub pointer has not moved beyond urb_priv->last_sb mark
10830 -                                          it for a descriptor interrupt and complete the urb in
10831 -                                          the descriptor interrupt handler.
10832 -                                       */
10833 -                                       USB_SB_Desc_t *sub = TxCtrlEPList[urb_priv->epid].sub ? phys_to_virt(TxCtrlEPList[urb_priv->epid].sub) : 0;
10834 -
10835 -                                       while ((sub != NULL) && (sub != urb_priv->last_sb)) {
10836 -                                               sub = sub->next ? phys_to_virt(sub->next) : 0;
10837 -                                       }
10838 -                                       if (sub != NULL) {
10839 -                                               /* The urb has not been fully processed. */
10840 -                                               urb_priv->urb_state = WAITING_FOR_DESCR_INTR;
10841 -                                       } else {
10842 -                                               warn("(CTRL) epid enabled and urb (0x%p) processed, ep->sub=0x%p", urb, (char*)TxCtrlEPList[urb_priv->epid].sub);
10843 -                                               etrax_usb_complete_urb(urb, 0);
10844 -                                       }
10845 -                               } else {
10846 -                                       etrax_usb_complete_urb(urb, 0);
10847 -                               }
10848 -                       }
10849 -
10850 -               } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
10851 -
10852 -                       struct usb_iso_packet_descriptor *packet;
10853 -
10854 -                       if (urb_priv->urb_state == UNLINK) {
10855 -                               info("Ignoring rx data for urb being unlinked.");
10856 -                               goto skip_out;
10857 -                       } else if (urb_priv->urb_state == NOT_STARTED) {
10858 -                               info("What? Got rx data for urb that isn't started?");
10859 -                               goto skip_out;
10860 -                       }
10861 -
10862 -                       packet = &urb->iso_frame_desc[urb_priv->isoc_packet_counter];
10863 -                       packet->status = 0;
10864 -
10865 -                       if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
10866 -                               /* We get nodata for empty data transactions, and the rx descriptor's
10867 -                                  hw_len field is not valid in that case. We copy 0 bytes however to
10868 -                                  stay in synch. */
10869 -                               packet->actual_length = 0;
10870 -                       } else {
10871 -                               packet->actual_length = myNextRxDesc->hw_len;
10872 -                               /* Make sure the data fits in the buffer. */
10873 -                               assert(packet->actual_length <= packet->length);
10874 -                               memcpy(urb->transfer_buffer + packet->offset,
10875 -                                      phys_to_virt(myNextRxDesc->buf), packet->actual_length);
10876 -                       }
10877 -
10878 -                       /* Increment the packet counter. */
10879 -                       urb_priv->isoc_packet_counter++;
10880 -
10881 -                       /* Note that we don't care about the eot field in the rx descriptor's status.
10882 -                          It will always be set for isoc traffic. */
10883 -                       if (urb->number_of_packets == urb_priv->isoc_packet_counter) {
10884 -
10885 -                               /* Out-of-synch diagnostics. */
10886 -                               curr_fm = (*R_USB_FM_NUMBER & 0x7ff);
10887 -                               if (((prev_fm + urb_priv->isoc_packet_counter) % (0x7ff + 1)) != curr_fm) {
10888 -                                       /* This test is wrong, if there is more than one isoc
10889 -                                          in endpoint active it will always calculate wrong
10890 -                                          since prev_fm is shared by all endpoints.
10891 -
10892 -                                          FIXME Make this check per URB using urb->start_frame.
10893 -                                       */
10894 -                                       dbg_isoc("Out of synch? Previous frame = %d, current frame = %d",
10895 -                                                prev_fm, curr_fm);
10896 -
10897 -                               }
10898 -                               prev_fm = curr_fm;
10899 -
10900 -                               /* Complete the urb with status OK. */
10901 -                               etrax_usb_complete_isoc_urb(urb, 0);
10902 -                       }
10903 -               }
10904 -
10905 -       skip_out:
10906 -
10907 -               /* DMA IN cache bug. Flush the DMA IN buffer from the cache. (struct etrax_dma_descr
10908 -                  has the same layout as USB_IN_Desc for the relevant fields.) */
10909 -               prepare_rx_descriptor((struct etrax_dma_descr*)myNextRxDesc);
10910 -
10911 -               myPrevRxDesc = myNextRxDesc;
10912 -               myPrevRxDesc->command |= IO_MASK(USB_IN_command, eol);
10913 -               myLastRxDesc->command &= ~IO_MASK(USB_IN_command, eol);
10914 -               myLastRxDesc = myPrevRxDesc;
10915 -
10916 -               myNextRxDesc->status = 0;
10917 -               myNextRxDesc = phys_to_virt(myNextRxDesc->next);
10918 -       }
10919 -
10920 -       restore_flags(flags);
10921 -
10922 -       DBFEXIT;
10923 -
10924 -        return IRQ_HANDLED;
10925 -}
10926 -
10927 -
10928 -/* This function will unlink the SB descriptors associated with this urb. */
10929 -static int etrax_remove_from_sb_list(struct urb *urb)
10930 -{
10931 -       USB_SB_Desc_t *next_sb, *first_sb, *last_sb;
10932 -       etrax_urb_priv_t *urb_priv;
10933 -       int i = 0;
10934 -
10935 -       DBFENTER;
10936 -
10937 -       urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
10938 -       assert(urb_priv);
10939 -
10940 -       /* Just a sanity check. Since we don't fiddle with the DMA list the EP descriptor
10941 -          doesn't really need to be disabled, it's just that we expect it to be. */
10942 -       if (usb_pipetype(urb->pipe) == PIPE_BULK) {
10943 -               assert(!(TxBulkEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)));
10944 -       } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
10945 -               assert(!(TxCtrlEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)));
10946 -       }
10947 -
10948 -       first_sb = urb_priv->first_sb;
10949 -       last_sb = urb_priv->last_sb;
10950 -
10951 -       assert(first_sb);
10952 -       assert(last_sb);
10953 -
10954 -       while (first_sb != last_sb) {
10955 -               next_sb = (USB_SB_Desc_t *)phys_to_virt(first_sb->next);
10956 -               kmem_cache_free(usb_desc_cache, first_sb);
10957 -               first_sb = next_sb;
10958 -               i++;
10959 -       }
10960 -       kmem_cache_free(usb_desc_cache, last_sb);
10961 -       i++;
10962 -       dbg_sb("%d SB descriptors freed", i);
10963 -       /* Compare i with urb->number_of_packets for Isoc traffic.
10964 -          Should be same when calling unlink_urb */
10965 -
10966 -       DBFEXIT;
10967 -
10968 -       return i;
10969 -}
10970 -
10971 -static int etrax_usb_submit_bulk_urb(struct urb *urb)
10972 -{
10973 -       int epid;
10974 -       int empty;
10975 -       unsigned long flags;
10976 -       etrax_urb_priv_t *urb_priv;
10977 -
10978 -       DBFENTER;
10979 -
10980 -       /* Epid allocation, empty check and list add must be protected.
10981 -          Read about this in etrax_usb_submit_ctrl_urb. */
10982 -
10983 -       spin_lock_irqsave(&urb_list_lock, flags);
10984 -       epid = etrax_usb_setup_epid(urb);
10985 -       if (epid == -1) {
10986 -               DBFEXIT;
10987 -               spin_unlock_irqrestore(&urb_list_lock, flags);
10988 -               return -ENOMEM;
10989 -       }
10990 -       empty = urb_list_empty(epid);
10991 -       urb_list_add(urb, epid);
10992 -       spin_unlock_irqrestore(&urb_list_lock, flags);
10993 -
10994 -       dbg_bulk("Adding bulk %s urb 0x%lx to %s list, epid %d",
10995 -                usb_pipein(urb->pipe) ? "IN" : "OUT", (unsigned long)urb, empty ? "empty" : "", epid);
10996 -
10997 -       /* Mark the urb as being in progress. */
10998 -       urb->status = -EINPROGRESS;
10999 -
11000 -       /* Setup the hcpriv data. */
11001 -       urb_priv = kzalloc(sizeof(etrax_urb_priv_t), KMALLOC_FLAG);
11002 -       assert(urb_priv != NULL);
11003 -       /* This sets rx_offset to 0. */
11004 -       urb_priv->urb_state = NOT_STARTED;
11005 -       urb->hcpriv = urb_priv;
11006 -
11007 -       if (empty) {
11008 -               etrax_usb_add_to_bulk_sb_list(urb, epid);
11009 -       }
11010 -
11011 -       DBFEXIT;
11012 -
11013 -       return 0;
11014 -}
11015 -
11016 -static void etrax_usb_add_to_bulk_sb_list(struct urb *urb, int epid)
11017 -{
11018 -       USB_SB_Desc_t *sb_desc;
11019 -       etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
11020 -       unsigned long flags;
11021 -       char maxlen;
11022 -
11023 -       DBFENTER;
11024 -
11025 -       dbg_bulk("etrax_usb_add_to_bulk_sb_list, urb 0x%lx", (unsigned long)urb);
11026 -
11027 -       maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
11028 -
11029 -       sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
11030 -       assert(sb_desc != NULL);
11031 -       memset(sb_desc, 0, sizeof(USB_SB_Desc_t));
11032 -
11033 -
11034 -       if (usb_pipeout(urb->pipe)) {
11035 -
11036 -               dbg_bulk("Grabbing bulk OUT, urb 0x%lx, epid %d", (unsigned long)urb, epid);
11037 -
11038 -               /* This is probably a sanity check of the bulk transaction length
11039 -                  not being larger than 64 kB. */
11040 -               if (urb->transfer_buffer_length > 0xffff) {
11041 -                       panic("urb->transfer_buffer_length > 0xffff");
11042 -               }
11043 -
11044 -               sb_desc->sw_len = urb->transfer_buffer_length;
11045 -
11046 -               /* The rem field is don't care if it's not a full-length transfer, so setting
11047 -                  it shouldn't hurt. Also, rem isn't used for OUT traffic. */
11048 -               sb_desc->command = (IO_FIELD(USB_SB_command, rem, 0) |
11049 -                                   IO_STATE(USB_SB_command, tt, out) |
11050 -                                   IO_STATE(USB_SB_command, eot, yes) |
11051 -                                   IO_STATE(USB_SB_command, eol, yes));
11052 -
11053 -               /* The full field is set to yes, even if we don't actually check that this is
11054 -                  a full-length transfer (i.e., that transfer_buffer_length % maxlen = 0).
11055 -                  Setting full prevents the USB controller from sending an empty packet in
11056 -                  that case.  However, if URB_ZERO_PACKET was set we want that. */
11057 -               if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
11058 -                       sb_desc->command |= IO_STATE(USB_SB_command, full, yes);
11059 -               }
11060 -
11061 -               sb_desc->buf = virt_to_phys(urb->transfer_buffer);
11062 -               sb_desc->next = 0;
11063 -
11064 -       } else if (usb_pipein(urb->pipe)) {
11065 -
11066 -               dbg_bulk("Grabbing bulk IN, urb 0x%lx, epid %d", (unsigned long)urb, epid);
11067 -
11068 -               sb_desc->sw_len = urb->transfer_buffer_length ?
11069 -                       (urb->transfer_buffer_length - 1) / maxlen + 1 : 0;
11070 -
11071 -               /* The rem field is don't care if it's not a full-length transfer, so setting
11072 -                  it shouldn't hurt. */
11073 -               sb_desc->command =
11074 -                       (IO_FIELD(USB_SB_command, rem,
11075 -                                 urb->transfer_buffer_length % maxlen) |
11076 -                        IO_STATE(USB_SB_command, tt, in) |
11077 -                        IO_STATE(USB_SB_command, eot, yes) |
11078 -                        IO_STATE(USB_SB_command, eol, yes));
11079 -
11080 -               sb_desc->buf = 0;
11081 -               sb_desc->next = 0;
11082 -       }
11083 -
11084 -       urb_priv->first_sb = sb_desc;
11085 -       urb_priv->last_sb = sb_desc;
11086 -       urb_priv->epid = epid;
11087 -
11088 -       urb->hcpriv = urb_priv;
11089 -
11090 -       /* Reset toggle bits and reset error count. */
11091 -       save_flags(flags);
11092 -       cli();
11093 -
11094 -       *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
11095 -       nop();
11096 -
11097 -       /* FIXME: Is this a special case since the hold field is checked,
11098 -          or should we check hold in a lot of other cases as well? */
11099 -       if (*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) {
11100 -               panic("Hold was set in %s", __FUNCTION__);
11101 -       }
11102 -
11103 -       /* Reset error counters (regardless of which direction this traffic is). */
11104 -       *R_USB_EPT_DATA &=
11105 -               ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
11106 -                 IO_MASK(R_USB_EPT_DATA, error_count_out));
11107 -
11108 -       /* Software must preset the toggle bits. */
11109 -       if (usb_pipeout(urb->pipe)) {
11110 -               char toggle =
11111 -                       usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
11112 -               *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
11113 -               *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
11114 -       } else {
11115 -               char toggle =
11116 -                       usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
11117 -               *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
11118 -               *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
11119 -       }
11120 -
11121 -       /* Assert that the EP descriptor is disabled. */
11122 -       assert(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
11123 -
11124 -       /* The reason we set the EP's sub pointer directly instead of
11125 -          walking the SB list and linking it last in the list is that we only
11126 -          have one active urb at a time (the rest are queued). */
11127 -
11128 -       /* Note that we cannot have interrupts running when we have set the SB descriptor
11129 -          but the EP is not yet enabled.  If a bulk eot happens for another EP, we will
11130 -          find this EP disabled and with a SB != 0, which will make us think that it's done. */
11131 -       TxBulkEPList[epid].sub = virt_to_phys(sb_desc);
11132 -       TxBulkEPList[epid].hw_len = 0;
11133 -       /* Note that we don't have to fill in the ep_id field since this
11134 -          was done when we allocated the EP descriptors in init_tx_bulk_ep. */
11135 -
11136 -       /* Check if the dummy list is already with us (if several urbs were queued). */
11137 -       if (TxBulkEPList[epid].next != virt_to_phys(&TxBulkDummyEPList[epid][0])) {
11138 -
11139 -               dbg_bulk("Inviting dummy list to the party for urb 0x%lx, epid %d",
11140 -                        (unsigned long)urb, epid);
11141 -
11142 -               /* The last EP in the dummy list already has its next pointer set to
11143 -                  TxBulkEPList[epid].next. */
11144 -
11145 -               /* We don't need to check if the DMA is at this EP or not before changing the
11146 -                  next pointer, since we will do it in one 32-bit write (EP descriptors are
11147 -                  32-bit aligned). */
11148 -               TxBulkEPList[epid].next = virt_to_phys(&TxBulkDummyEPList[epid][0]);
11149 -       }
11150 -       /* Enable the EP descr. */
11151 -       dbg_bulk("Enabling bulk EP for urb 0x%lx, epid %d", (unsigned long)urb, epid);
11152 -       TxBulkEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
11153 -
11154 -       /* Everything is set up, safe to enable interrupts again. */
11155 -       restore_flags(flags);
11156 -
11157 -       /* If the DMA bulk channel isn't running, we need to restart it if it
11158 -          has stopped at the last EP descriptor (DMA stopped because there was
11159 -          no more traffic) or if it has stopped at a dummy EP with the intr flag
11160 -          set (DMA stopped because we were too slow in inserting new traffic). */
11161 -       if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
11162 -
11163 -               USB_EP_Desc_t *ep;
11164 -               ep = (USB_EP_Desc_t *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
11165 -               dbg_bulk("DMA channel not running in add");
11166 -               dbg_bulk("DMA is at 0x%lx", (unsigned long)ep);
11167 -
11168 -               if (*R_DMA_CH8_SUB0_EP == virt_to_phys(&TxBulkEPList[NBR_OF_EPIDS - 1]) ||
11169 -                   (ep->command & 0x8) >> 3) {
11170 -                       *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
11171 -                       /* Update/restart the bulk start timer since we just started the channel. */
11172 -                       mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
11173 -                       /* Update/restart the bulk eot timer since we just inserted traffic. */
11174 -                       mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
11175 -               }
11176 -       }
11177 -
11178 -       DBFEXIT;
11179 -}
11180 -
11181 -static void etrax_usb_complete_bulk_urb(struct urb *urb, int status)
11182 -{
11183 -       etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
11184 -       int epid = urb_priv->epid;
11185 -       unsigned long flags;
11186 -
11187 -       DBFENTER;
11188 -
11189 -       if (status)
11190 -               warn("Completing bulk urb with status %d.", status);
11191 -
11192 -       dbg_bulk("Completing bulk urb 0x%lx for epid %d", (unsigned long)urb, epid);
11193 -
11194 -       /* Update the urb list. */
11195 -       urb_list_del(urb, epid);
11196 -
11197 -       /* For an IN pipe, we always set the actual length, regardless of whether there was
11198 -          an error or not (which means the device driver can use the data if it wants to). */
11199 -       if (usb_pipein(urb->pipe)) {
11200 -               urb->actual_length = urb_priv->rx_offset;
11201 -       } else {
11202 -               /* Set actual_length for OUT urbs also; the USB mass storage driver seems
11203 -                  to want that. We wouldn't know of any partial writes if there was an error. */
11204 -               if (status == 0) {
11205 -                       urb->actual_length = urb->transfer_buffer_length;
11206 -               } else {
11207 -                       urb->actual_length = 0;
11208 -               }
11209 -       }
11210 -
11211 -       /* FIXME: Is there something of the things below we shouldn't do if there was an error?
11212 -          Like, maybe we shouldn't toggle the toggle bits, or maybe we shouldn't insert more traffic. */
11213 -
11214 -       save_flags(flags);
11215 -       cli();
11216 -
11217 -       *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
11218 -       nop();
11219 -
11220 -       /* We need to fiddle with the toggle bits because the hardware doesn't do it for us. */
11221 -       if (usb_pipeout(urb->pipe)) {
11222 -               char toggle =
11223 -                       IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
11224 -               usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
11225 -                             usb_pipeout(urb->pipe), toggle);
11226 -       } else {
11227 -               char toggle =
11228 -                       IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
11229 -               usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
11230 -                             usb_pipeout(urb->pipe), toggle);
11231 -       }
11232 -       restore_flags(flags);
11233 -
11234 -       /* Remember to free the SBs. */
11235 -       etrax_remove_from_sb_list(urb);
11236 -       kfree(urb_priv);
11237 -       urb->hcpriv = 0;
11238 -
11239 -       /* If there are any more urb's in the list we'd better start sending */
11240 -       if (!urb_list_empty(epid)) {
11241 -
11242 -               struct urb *new_urb;
11243 -
11244 -               /* Get the first urb. */
11245 -               new_urb = urb_list_first(epid);
11246 -               assert(new_urb);
11247 -
11248 -               dbg_bulk("More bulk for epid %d", epid);
11249 -
11250 -               etrax_usb_add_to_bulk_sb_list(new_urb, epid);
11251 -       }
11252 -
11253 -       urb->status = status;
11254 -
11255 -       /* We let any non-zero status from the layer above have precedence. */
11256 -       if (status == 0) {
11257 -               /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's max length)
11258 -                  is to be treated as an error. */
11259 -               if (urb->transfer_flags & URB_SHORT_NOT_OK) {
11260 -                       if (usb_pipein(urb->pipe) &&
11261 -                           (urb->actual_length !=
11262 -                            usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)))) {
11263 -                               urb->status = -EREMOTEIO;
11264 -                       }
11265 -               }
11266 -       }
11267 -
11268 -       if (urb->complete) {
11269 -               urb->complete(urb, NULL);
11270 -       }
11271 -
11272 -       if (urb_list_empty(epid)) {
11273 -               /* This means that this EP is now free, deconfigure it. */
11274 -               etrax_usb_free_epid(epid);
11275 -
11276 -               /* No more traffic; time to clean up.
11277 -                  Must set sub pointer to 0, since we look at the sub pointer when handling
11278 -                  the bulk eot interrupt. */
11279 -
11280 -               dbg_bulk("No bulk for epid %d", epid);
11281 -
11282 -               TxBulkEPList[epid].sub = 0;
11283 -
11284 -               /* Unlink the dummy list. */
11285 -
11286 -               dbg_bulk("Kicking dummy list out of party for urb 0x%lx, epid %d",
11287 -                        (unsigned long)urb, epid);
11288 -
11289 -               /* No need to wait for the DMA before changing the next pointer.
11290 -                  The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
11291 -                  the last one (INVALID_EPID) for actual traffic. */
11292 -               TxBulkEPList[epid].next =
11293 -                       virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
11294 -       }
11295 -
11296 -       DBFEXIT;
11297 -}
11298 -
11299 -static int etrax_usb_submit_ctrl_urb(struct urb *urb)
11300 -{
11301 -       int epid;
11302 -       int empty;
11303 -       unsigned long flags;
11304 -       etrax_urb_priv_t *urb_priv;
11305 -
11306 -       DBFENTER;
11307 -
11308 -       /* FIXME: Return -ENXIO if there is already a queued urb for this endpoint? */
11309 -
11310 -       /* Epid allocation, empty check and list add must be protected.
11311 -
11312 -          Epid allocation because if we find an existing epid for this endpoint an urb might be
11313 -          completed (emptying the list) before we add the new urb to the list, causing the epid
11314 -          to be de-allocated. We would then start the transfer with an invalid epid -> epid attn.
11315 -
11316 -          Empty check and add because otherwise we might conclude that the list is not empty,
11317 -          after which it becomes empty before we add the new urb to the list, causing us not to
11318 -          insert the new traffic into the SB list. */
11319 -
11320 -       spin_lock_irqsave(&urb_list_lock, flags);
11321 -       epid = etrax_usb_setup_epid(urb);
11322 -       if (epid == -1) {
11323 -               spin_unlock_irqrestore(&urb_list_lock, flags);
11324 -               DBFEXIT;
11325 -               return -ENOMEM;
11326 -       }
11327 -       empty = urb_list_empty(epid);
11328 -       urb_list_add(urb, epid);
11329 -       spin_unlock_irqrestore(&urb_list_lock, flags);
11330 -
11331 -       dbg_ctrl("Adding ctrl urb 0x%lx to %s list, epid %d",
11332 -                (unsigned long)urb, empty ? "empty" : "", epid);
11333 -
11334 -       /* Mark the urb as being in progress. */
11335 -       urb->status = -EINPROGRESS;
11336 -
11337 -       /* Setup the hcpriv data. */
11338 -       urb_priv = kzalloc(sizeof(etrax_urb_priv_t), KMALLOC_FLAG);
11339 -       assert(urb_priv != NULL);
11340 -       /* This sets rx_offset to 0. */
11341 -       urb_priv->urb_state = NOT_STARTED;
11342 -       urb->hcpriv = urb_priv;
11343 -
11344 -       if (empty) {
11345 -               etrax_usb_add_to_ctrl_sb_list(urb, epid);
11346 -       }
11347 -
11348 -       DBFEXIT;
11349 -
11350 -       return 0;
11351 -}
11352 -
11353 -static void etrax_usb_add_to_ctrl_sb_list(struct urb *urb, int epid)
11354 -{
11355 -       USB_SB_Desc_t *sb_desc_setup;
11356 -       USB_SB_Desc_t *sb_desc_data;
11357 -       USB_SB_Desc_t *sb_desc_status;
11358 -
11359 -       etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
11360 -
11361 -       unsigned long flags;
11362 -       char maxlen;
11363 -
11364 -       DBFENTER;
11365 -
11366 -       maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
11367 -
11368 -       sb_desc_setup = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
11369 -       assert(sb_desc_setup != NULL);
11370 -       sb_desc_status = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
11371 -       assert(sb_desc_status != NULL);
11372 -
11373 -       /* Initialize the mandatory setup SB descriptor (used only in control transfers) */
11374 -       sb_desc_setup->sw_len = 8;
11375 -       sb_desc_setup->command = (IO_FIELD(USB_SB_command, rem, 0) |
11376 -                                 IO_STATE(USB_SB_command, tt, setup) |
11377 -                                 IO_STATE(USB_SB_command, full, yes) |
11378 -                                 IO_STATE(USB_SB_command, eot, yes));
11379 -
11380 -       sb_desc_setup->buf = virt_to_phys(urb->setup_packet);
11381 -
11382 -       if (usb_pipeout(urb->pipe)) {
11383 -               dbg_ctrl("Transfer for epid %d is OUT", epid);
11384 -
11385 -               /* If this Control OUT transfer has an optional data stage we add an OUT token
11386 -                  before the mandatory IN (status) token, hence the reordered SB list */
11387 -
11388 -               sb_desc_setup->next = virt_to_phys(sb_desc_status);
11389 -               if (urb->transfer_buffer) {
11390 -
11391 -                       dbg_ctrl("This OUT transfer has an extra data stage");
11392 -
11393 -                       sb_desc_data = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
11394 -                       assert(sb_desc_data != NULL);
11395 -
11396 -                       sb_desc_setup->next = virt_to_phys(sb_desc_data);
11397 -
11398 -                       sb_desc_data->sw_len = urb->transfer_buffer_length;
11399 -                       sb_desc_data->command = (IO_STATE(USB_SB_command, tt, out) |
11400 -                                                IO_STATE(USB_SB_command, full, yes) |
11401 -                                                IO_STATE(USB_SB_command, eot, yes));
11402 -                       sb_desc_data->buf = virt_to_phys(urb->transfer_buffer);
11403 -                       sb_desc_data->next = virt_to_phys(sb_desc_status);
11404 -               }
11405 -
11406 -               sb_desc_status->sw_len = 1;
11407 -               sb_desc_status->command = (IO_FIELD(USB_SB_command, rem, 0) |
11408 -                                          IO_STATE(USB_SB_command, tt, in) |
11409 -                                          IO_STATE(USB_SB_command, eot, yes) |
11410 -                                          IO_STATE(USB_SB_command, intr, yes) |
11411 -                                          IO_STATE(USB_SB_command, eol, yes));
11412 -
11413 -               sb_desc_status->buf = 0;
11414 -               sb_desc_status->next = 0;
11415 -
11416 -       } else if (usb_pipein(urb->pipe)) {
11417 -
11418 -               dbg_ctrl("Transfer for epid %d is IN", epid);
11419 -               dbg_ctrl("transfer_buffer_length = %d", urb->transfer_buffer_length);
11420 -               dbg_ctrl("rem is calculated to %d", urb->transfer_buffer_length % maxlen);
11421 -
11422 -               sb_desc_data = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
11423 -               assert(sb_desc_data != NULL);
11424 -
11425 -               sb_desc_setup->next = virt_to_phys(sb_desc_data);
11426 -
11427 -               sb_desc_data->sw_len = urb->transfer_buffer_length ?
11428 -                       (urb->transfer_buffer_length - 1) / maxlen + 1 : 0;
11429 -               dbg_ctrl("sw_len got %d", sb_desc_data->sw_len);
11430 -
11431 -               sb_desc_data->command =
11432 -                       (IO_FIELD(USB_SB_command, rem,
11433 -                                 urb->transfer_buffer_length % maxlen) |
11434 -                        IO_STATE(USB_SB_command, tt, in) |
11435 -                        IO_STATE(USB_SB_command, eot, yes));
11436 -
11437 -               sb_desc_data->buf = 0;
11438 -               sb_desc_data->next = virt_to_phys(sb_desc_status);
11439 -
11440 -               /* Read comment at zout_buffer declaration for an explanation to this. */
11441 -               sb_desc_status->sw_len = 1;
11442 -               sb_desc_status->command = (IO_FIELD(USB_SB_command, rem, 0) |
11443 -                                          IO_STATE(USB_SB_command, tt, zout) |
11444 -                                          IO_STATE(USB_SB_command, full, yes) |
11445 -                                          IO_STATE(USB_SB_command, eot, yes) |
11446 -                                          IO_STATE(USB_SB_command, intr, yes) |
11447 -                                          IO_STATE(USB_SB_command, eol, yes));
11448 -
11449 -               sb_desc_status->buf = virt_to_phys(&zout_buffer[0]);
11450 -               sb_desc_status->next = 0;
11451 -       }
11452 -
11453 -       urb_priv->first_sb = sb_desc_setup;
11454 -       urb_priv->last_sb = sb_desc_status;
11455 -       urb_priv->epid = epid;
11456 -
11457 -       urb_priv->urb_state = STARTED;
11458 -
11459 -       /* Reset toggle bits and reset error count, remember to di and ei */
11460 -       /* Warning: it is possible that this locking doesn't work with bottom-halves */
11461 -
11462 -       save_flags(flags);
11463 -       cli();
11464 -
11465 -       *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
11466 -       nop();
11467 -       if (*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) {
11468 -               panic("Hold was set in %s", __FUNCTION__);
11469 -       }
11470 -
11471 -
11472 -       /* FIXME: Compare with etrax_usb_add_to_bulk_sb_list where the toggle bits
11473 -          are set to a specific value. Why the difference? Read "Transfer and Toggle Bits
11474 -          in Designer's Reference, p. 8 - 11. */
11475 -       *R_USB_EPT_DATA &=
11476 -               ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
11477 -                 IO_MASK(R_USB_EPT_DATA, error_count_out) |
11478 -                 IO_MASK(R_USB_EPT_DATA, t_in) |
11479 -                 IO_MASK(R_USB_EPT_DATA, t_out));
11480 -
11481 -       /* Since we use the rx interrupt to complete ctrl urbs, we can enable interrupts now
11482 -          (i.e. we don't check the sub pointer on an eot interrupt like we do for bulk traffic). */
11483 -       restore_flags(flags);
11484 -
11485 -       /* Assert that the EP descriptor is disabled. */
11486 -       assert(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
11487 -
11488 -       /* Set up and enable the EP descriptor. */
11489 -       TxCtrlEPList[epid].sub = virt_to_phys(sb_desc_setup);
11490 -       TxCtrlEPList[epid].hw_len = 0;
11491 -       TxCtrlEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
11492 -
11493 -       /* We start the DMA sub channel without checking if it's running or not, because:
11494 -          1) If it's already running, issuing the start command is a nop.
11495 -          2) We avoid a test-and-set race condition. */
11496 -       *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
11497 -
11498 -       DBFEXIT;
11499 -}
11500 -
11501 -static void etrax_usb_complete_ctrl_urb(struct urb *urb, int status)
11502 -{
11503 -       etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
11504 -       int epid = urb_priv->epid;
11505 -
11506 -       DBFENTER;
11507 -
11508 -       if (status)
11509 -               warn("Completing ctrl urb with status %d.", status);
11510 -
11511 -       dbg_ctrl("Completing ctrl epid %d, urb 0x%lx", epid, (unsigned long)urb);
11512 -
11513 -       /* Remove this urb from the list. */
11514 -       urb_list_del(urb, epid);
11515 -
11516 -       /* For an IN pipe, we always set the actual length, regardless of whether there was
11517 -          an error or not (which means the device driver can use the data if it wants to). */
11518 -       if (usb_pipein(urb->pipe)) {
11519 -               urb->actual_length = urb_priv->rx_offset;
11520 -       }
11521 -
11522 -       /* FIXME: Is there something of the things below we shouldn't do if there was an error?
11523 -          Like, maybe we shouldn't insert more traffic. */
11524 -
11525 -       /* Remember to free the SBs. */
11526 -       etrax_remove_from_sb_list(urb);
11527 -       kfree(urb_priv);
11528 -       urb->hcpriv = 0;
11529 -
11530 -       /* If there are any more urbs in the list we'd better start sending. */
11531 -       if (!urb_list_empty(epid)) {
11532 -               struct urb *new_urb;
11533 -
11534 -               /* Get the first urb. */
11535 -               new_urb = urb_list_first(epid);
11536 -               assert(new_urb);
11537 -
11538 -               dbg_ctrl("More ctrl for epid %d, first urb = 0x%lx", epid, (unsigned long)new_urb);
11539 -
11540 -               etrax_usb_add_to_ctrl_sb_list(new_urb, epid);
11541 -       }
11542 -
11543 -       urb->status = status;
11544 -
11545 -       /* We let any non-zero status from the layer above have precedence. */
11546 -       if (status == 0) {
11547 -               /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's max length)
11548 -                  is to be treated as an error. */
11549 -               if (urb->transfer_flags & URB_SHORT_NOT_OK) {
11550 -                       if (usb_pipein(urb->pipe) &&
11551 -                           (urb->actual_length !=
11552 -                            usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)))) {
11553 -                               urb->status = -EREMOTEIO;
11554 -                       }
11555 -               }
11556 -       }
11557 -
11558 -       if (urb->complete) {
11559 -               urb->complete(urb, NULL);
11560 -       }
11561 -
11562 -       if (urb_list_empty(epid)) {
11563 -               /* No more traffic. Time to clean up. */
11564 -               etrax_usb_free_epid(epid);
11565 -               /* Must set sub pointer to 0. */
11566 -               dbg_ctrl("No ctrl for epid %d", epid);
11567 -               TxCtrlEPList[epid].sub = 0;
11568 -       }
11569 -
11570 -       DBFEXIT;
11571 -}
11572 -
11573 -static int etrax_usb_submit_intr_urb(struct urb *urb)
11574 -{
11575 -
11576 -       int epid;
11577 -
11578 -       DBFENTER;
11579 -
11580 -       if (usb_pipeout(urb->pipe)) {
11581 -               /* Unsupported transfer type.
11582 -                  We don't support interrupt out traffic. (If we do, we can't support
11583 -                  intervals for neither in or out traffic, but are forced to schedule all
11584 -                  interrupt traffic in one frame.) */
11585 -               return -EINVAL;
11586 -       }
11587 -
11588 -       epid = etrax_usb_setup_epid(urb);
11589 -       if (epid == -1) {
11590 -               DBFEXIT;
11591 -               return -ENOMEM;
11592 -       }
11593 -
11594 -       if (!urb_list_empty(epid)) {
11595 -               /* There is already a queued urb for this endpoint. */
11596 -               etrax_usb_free_epid(epid);
11597 -               return -ENXIO;
11598 -       }
11599 -
11600 -       urb->status = -EINPROGRESS;
11601 -
11602 -       dbg_intr("Add intr urb 0x%lx, to list, epid %d", (unsigned long)urb, epid);
11603 -
11604 -       urb_list_add(urb, epid);
11605 -       etrax_usb_add_to_intr_sb_list(urb, epid);
11606 -
11607 -       return 0;
11608 -
11609 -       DBFEXIT;
11610 -}
11611 -
11612 -static void etrax_usb_add_to_intr_sb_list(struct urb *urb, int epid)
11613 -{
11614 -
11615 -       volatile USB_EP_Desc_t *tmp_ep;
11616 -       volatile USB_EP_Desc_t *first_ep;
11617 -
11618 -       char maxlen;
11619 -       int interval;
11620 -       int i;
11621 -
11622 -       etrax_urb_priv_t *urb_priv;
11623 -
11624 -       DBFENTER;
11625 -
11626 -       maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
11627 -       interval = urb->interval;
11628 -
11629 -       urb_priv = kzalloc(sizeof(etrax_urb_priv_t), KMALLOC_FLAG);
11630 -       assert(urb_priv != NULL);
11631 -       urb->hcpriv = urb_priv;
11632 -
11633 -       first_ep = &TxIntrEPList[0];
11634 -
11635 -       /* Round of the interval to 2^n, it is obvious that this code favours
11636 -          smaller numbers, but that is actually a good thing */
11637 -       /* FIXME: The "rounding error" for larger intervals will be quite
11638 -          large. For in traffic this shouldn't be a problem since it will only
11639 -          mean that we "poll" more often. */
11640 -       for (i = 0; interval; i++) {
11641 -               interval = interval >> 1;
11642 -       }
11643 -       interval = 1 << (i - 1);
11644 -
11645 -       dbg_intr("Interval rounded to %d", interval);
11646 -
11647 -       tmp_ep = first_ep;
11648 -       i = 0;
11649 -       do {
11650 -               if (tmp_ep->command & IO_MASK(USB_EP_command, eof)) {
11651 -                       if ((i % interval) == 0) {
11652 -                               /* Insert the traffic ep after tmp_ep */
11653 -                               USB_EP_Desc_t *ep_desc;
11654 -                               USB_SB_Desc_t *sb_desc;
11655 -
11656 -                               dbg_intr("Inserting EP for epid %d", epid);
11657 -
11658 -                               ep_desc = (USB_EP_Desc_t *)
11659 -                                       kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
11660 -                               sb_desc = (USB_SB_Desc_t *)
11661 -                                       kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
11662 -                               assert(ep_desc != NULL);
11663 -                               CHECK_ALIGN(ep_desc);
11664 -                               assert(sb_desc != NULL);
11665 -
11666 -                               ep_desc->sub = virt_to_phys(sb_desc);
11667 -                               ep_desc->hw_len = 0;
11668 -                               ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) |
11669 -                                                   IO_STATE(USB_EP_command, enable, yes));
11670 -
11671 -
11672 -                               /* Round upwards the number of packets of size maxlen
11673 -                                  that this SB descriptor should receive. */
11674 -                               sb_desc->sw_len = urb->transfer_buffer_length ?
11675 -                                       (urb->transfer_buffer_length - 1) / maxlen + 1 : 0;
11676 -                               sb_desc->next = 0;
11677 -                               sb_desc->buf = 0;
11678 -                               sb_desc->command =
11679 -                                       (IO_FIELD(USB_SB_command, rem, urb->transfer_buffer_length % maxlen) |
11680 -                                        IO_STATE(USB_SB_command, tt, in) |
11681 -                                        IO_STATE(USB_SB_command, eot, yes) |
11682 -                                        IO_STATE(USB_SB_command, eol, yes));
11683 -
11684 -                               ep_desc->next = tmp_ep->next;
11685 -                               tmp_ep->next = virt_to_phys(ep_desc);
11686 -                       }
11687 -                       i++;
11688 -               }
11689 -               tmp_ep = (USB_EP_Desc_t *)phys_to_virt(tmp_ep->next);
11690 -       } while (tmp_ep != first_ep);
11691 -
11692 -
11693 -       /* Note that first_sb/last_sb doesn't apply to interrupt traffic. */
11694 -       urb_priv->epid = epid;
11695 -
11696 -       /* We start the DMA sub channel without checking if it's running or not, because:
11697 -          1) If it's already running, issuing the start command is a nop.
11698 -          2) We avoid a test-and-set race condition. */
11699 -       *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
11700 -
11701 -       DBFEXIT;
11702 -}
11703 -
11704 -
11705 -
11706 -static void etrax_usb_complete_intr_urb(struct urb *urb, int status)
11707 -{
11708 -       etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
11709 -       int epid = urb_priv->epid;
11710 -
11711 -       DBFENTER;
11712 -
11713 -       if (status)
11714 -               warn("Completing intr urb with status %d.", status);
11715 -
11716 -       dbg_intr("Completing intr epid %d, urb 0x%lx", epid, (unsigned long)urb);
11717 -
11718 -       urb->status = status;
11719 -       urb->actual_length = urb_priv->rx_offset;
11720 -
11721 -       dbg_intr("interrupt urb->actual_length = %d", urb->actual_length);
11722 -
11723 -       /* We let any non-zero status from the layer above have precedence. */
11724 -       if (status == 0) {
11725 -               /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's max length)
11726 -                  is to be treated as an error. */
11727 -               if (urb->transfer_flags & URB_SHORT_NOT_OK) {
11728 -                       if (urb->actual_length !=
11729 -                           usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
11730 -                               urb->status = -EREMOTEIO;
11731 -                       }
11732 -               }
11733 -       }
11734 -
11735 -       /* The driver will resubmit the URB so we need to remove it first */
11736 -        etrax_usb_unlink_urb(urb, 0);
11737 -       if (urb->complete) {
11738 -               urb->complete(urb, NULL);
11739 -       }
11740 -
11741 -       DBFEXIT;
11742 -}
11743 -
11744 -
11745 -static int etrax_usb_submit_isoc_urb(struct urb *urb)
11746 -{
11747 -       int epid;
11748 -       unsigned long flags;
11749 -
11750 -       DBFENTER;
11751 -
11752 -       dbg_isoc("Submitting isoc urb = 0x%lx", (unsigned long)urb);
11753 -
11754 -       /* Epid allocation, empty check and list add must be protected.
11755 -          Read about this in etrax_usb_submit_ctrl_urb. */
11756 -
11757 -       spin_lock_irqsave(&urb_list_lock, flags);
11758 -       /* Is there an active epid for this urb ? */
11759 -       epid = etrax_usb_setup_epid(urb);
11760 -       if (epid == -1) {
11761 -               DBFEXIT;
11762 -               spin_unlock_irqrestore(&urb_list_lock, flags);
11763 -               return -ENOMEM;
11764 -       }
11765 -
11766 -       /* Ok, now we got valid endpoint, lets insert some traffic */
11767 -
11768 -       urb->status = -EINPROGRESS;
11769 -
11770 -       /* Find the last urb in the URB_List and add this urb after that one.
11771 -          Also add the traffic, that is do an etrax_usb_add_to_isoc_sb_list.  This
11772 -          is important to make this in "real time" since isochronous traffic is
11773 -          time sensitive. */
11774 -
11775 -       dbg_isoc("Adding isoc urb to (possibly empty) list");
11776 -       urb_list_add(urb, epid);
11777 -       etrax_usb_add_to_isoc_sb_list(urb, epid);
11778 -       spin_unlock_irqrestore(&urb_list_lock, flags);
11779 -
11780 -       DBFEXIT;
11781 -
11782 -       return 0;
11783 -}
11784 -
11785 -static void etrax_usb_check_error_isoc_ep(const int epid)
11786 -{
11787 -       unsigned long int flags;
11788 -       int error_code;
11789 -       __u32 r_usb_ept_data;
11790 -
11791 -       /* We can't read R_USB_EPID_ATTN here since it would clear the iso_eof,
11792 -          bulk_eot and epid_attn interrupts.  So we just check the status of
11793 -          the epid without testing if for it in R_USB_EPID_ATTN. */
11794 -
11795 -
11796 -       save_flags(flags);
11797 -       cli();
11798 -       *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
11799 -       nop();
11800 -       /* Note that although there are separate R_USB_EPT_DATA and R_USB_EPT_DATA_ISO
11801 -          registers, they are located at the same address and are of the same size.
11802 -          In other words, this read should be ok for isoc also. */
11803 -       r_usb_ept_data = *R_USB_EPT_DATA;
11804 -       restore_flags(flags);
11805 -
11806 -       error_code = IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data);
11807 -
11808 -       if (r_usb_ept_data & IO_MASK(R_USB_EPT_DATA, hold)) {
11809 -               warn("Hold was set for epid %d.", epid);
11810 -               return;
11811 -       }
11812 -
11813 -       if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA_ISO, error_code, no_error)) {
11814 -
11815 -               /* This indicates that the SB list of the ept was completed before
11816 -                  new data was appended to it.  This is not an error, but indicates
11817 -                  large system or USB load and could possibly cause trouble for
11818 -                  very timing sensitive USB device drivers so we log it.
11819 -               */
11820 -               info("Isoc. epid %d disabled with no error", epid);
11821 -               return;
11822 -
11823 -       } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA_ISO, error_code, stall)) {
11824 -               /* Not really a protocol error, just says that the endpoint gave
11825 -                  a stall response. Note that error_code cannot be stall for isoc. */
11826 -               panic("Isoc traffic cannot stall");
11827 -
11828 -       } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA_ISO, error_code, bus_error)) {
11829 -               /* Two devices responded to a transaction request. Must be resolved
11830 -                  by software. FIXME: Reset ports? */
11831 -               panic("Bus error for epid %d."
11832 -                     " Two devices responded to transaction request",
11833 -                     epid);
11834 -
11835 -       } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, buffer_error)) {
11836 -               /* DMA overrun or underrun. */
11837 -               warn("Buffer overrun/underrun for epid %d. DMA too busy?", epid);
11838 -
11839 -               /* It seems that error_code = buffer_error in
11840 -                  R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
11841 -                  are the same error. */
11842 -       }
11843 -}
11844 -
11845 -
11846 -static void etrax_usb_add_to_isoc_sb_list(struct urb *urb, int epid)
11847 -{
11848 -
11849 -       int i = 0;
11850 -
11851 -       etrax_urb_priv_t *urb_priv;
11852 -       USB_SB_Desc_t *prev_sb_desc,  *next_sb_desc, *temp_sb_desc;
11853 -
11854 -       DBFENTER;
11855 -
11856 -       prev_sb_desc = next_sb_desc = temp_sb_desc = NULL;
11857 -
11858 -       urb_priv = kzalloc(sizeof(etrax_urb_priv_t), GFP_ATOMIC);
11859 -       assert(urb_priv != NULL);
11860 -
11861 -       urb->hcpriv = urb_priv;
11862 -       urb_priv->epid = epid;
11863 -
11864 -       if (usb_pipeout(urb->pipe)) {
11865 -
11866 -               if (urb->number_of_packets == 0) panic("etrax_usb_add_to_isoc_sb_list 0 packets\n");
11867 -
11868 -               dbg_isoc("Transfer for epid %d is OUT", epid);
11869 -               dbg_isoc("%d packets in URB", urb->number_of_packets);
11870 -
11871 -               /* Create one SB descriptor for each packet and link them together. */
11872 -               for (i = 0; i < urb->number_of_packets; i++) {
11873 -                       if (!urb->iso_frame_desc[i].length)
11874 -                               continue;
11875 -
11876 -                       next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_ATOMIC);
11877 -                       assert(next_sb_desc != NULL);
11878 -
11879 -                       if (urb->iso_frame_desc[i].length > 0) {
11880 -
11881 -                               next_sb_desc->command = (IO_STATE(USB_SB_command, tt, out) |
11882 -                                                        IO_STATE(USB_SB_command, eot, yes));
11883 -
11884 -                               next_sb_desc->sw_len = urb->iso_frame_desc[i].length;
11885 -                               next_sb_desc->buf = virt_to_phys((char*)urb->transfer_buffer + urb->iso_frame_desc[i].offset);
11886 -
11887 -                               /* Check if full length transfer. */
11888 -                               if (urb->iso_frame_desc[i].length ==
11889 -                                   usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
11890 -                                       next_sb_desc->command |= IO_STATE(USB_SB_command, full, yes);
11891 -                               }
11892 -                       } else {
11893 -                               dbg_isoc("zero len packet");
11894 -                               next_sb_desc->command = (IO_FIELD(USB_SB_command, rem, 0) |
11895 -                                                        IO_STATE(USB_SB_command, tt, zout) |
11896 -                                                        IO_STATE(USB_SB_command, eot, yes) |
11897 -                                                        IO_STATE(USB_SB_command, full, yes));
11898 -
11899 -                               next_sb_desc->sw_len = 1;
11900 -                               next_sb_desc->buf = virt_to_phys(&zout_buffer[0]);
11901 -                       }
11902 -
11903 -                       /* First SB descriptor that belongs to this urb */
11904 -                       if (i == 0)
11905 -                               urb_priv->first_sb = next_sb_desc;
11906 -                       else
11907 -                               prev_sb_desc->next = virt_to_phys(next_sb_desc);
11908 -
11909 -                       prev_sb_desc = next_sb_desc;
11910 -               }
11911 -
11912 -               next_sb_desc->command |= (IO_STATE(USB_SB_command, intr, yes) |
11913 -                                         IO_STATE(USB_SB_command, eol, yes));
11914 -               next_sb_desc->next = 0;
11915 -               urb_priv->last_sb = next_sb_desc;
11916 -
11917 -       } else if (usb_pipein(urb->pipe)) {
11918 -
11919 -               dbg_isoc("Transfer for epid %d is IN", epid);
11920 -               dbg_isoc("transfer_buffer_length = %d", urb->transfer_buffer_length);
11921 -               dbg_isoc("rem is calculated to %d", urb->iso_frame_desc[urb->number_of_packets - 1].length);
11922 -
11923 -               /* Note that in descriptors for periodic traffic are not consumed. This means that
11924 -                  the USB controller never propagates in the SB list. In other words, if there already
11925 -                  is an SB descriptor in the list for this EP we don't have to do anything. */
11926 -               if (TxIsocEPList[epid].sub == 0) {
11927 -                       dbg_isoc("Isoc traffic not already running, allocating SB");
11928 -
11929 -                       next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_ATOMIC);
11930 -                       assert(next_sb_desc != NULL);
11931 -
11932 -                       next_sb_desc->command = (IO_STATE(USB_SB_command, tt, in) |
11933 -                                                IO_STATE(USB_SB_command, eot, yes) |
11934 -                                                IO_STATE(USB_SB_command, eol, yes));
11935 -
11936 -                       next_sb_desc->next = 0;
11937 -                       next_sb_desc->sw_len = 1; /* Actual number of packets is not relevant
11938 -                                                    for periodic in traffic as long as it is more
11939 -                                                    than zero.  Set to 1 always. */
11940 -                       next_sb_desc->buf = 0;
11941 -
11942 -                       /* The rem field is don't care for isoc traffic, so we don't set it. */
11943 -
11944 -                       /* Only one SB descriptor that belongs to this urb. */
11945 -                       urb_priv->first_sb = next_sb_desc;
11946 -                       urb_priv->last_sb = next_sb_desc;
11947 -
11948 -               } else {
11949 -
11950 -                       dbg_isoc("Isoc traffic already running, just setting first/last_sb");
11951 -
11952 -                       /* Each EP for isoc in will have only one SB descriptor, setup when submitting the
11953 -                          already active urb. Note that even though we may have several first_sb/last_sb
11954 -                          pointing at the same SB descriptor, they are freed only once (when the list has
11955 -                          become empty). */
11956 -                       urb_priv->first_sb = phys_to_virt(TxIsocEPList[epid].sub);
11957 -                       urb_priv->last_sb = phys_to_virt(TxIsocEPList[epid].sub);
11958 -                       return;
11959 -               }
11960 -
11961 -       }
11962 -
11963 -       /* Find the spot to insert this urb and add it. */
11964 -       if (TxIsocEPList[epid].sub == 0) {
11965 -               /* First SB descriptor inserted in this list (in or out). */
11966 -               dbg_isoc("Inserting SB desc first in list");
11967 -               TxIsocEPList[epid].hw_len = 0;
11968 -               TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
11969 -
11970 -       } else {
11971 -               /* Isochronous traffic is already running, insert new traffic last (only out). */
11972 -               dbg_isoc("Inserting SB desc last in list");
11973 -               temp_sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
11974 -               while ((temp_sb_desc->command & IO_MASK(USB_SB_command, eol)) !=
11975 -                      IO_STATE(USB_SB_command, eol, yes)) {
11976 -                       assert(temp_sb_desc->next);
11977 -                       temp_sb_desc = phys_to_virt(temp_sb_desc->next);
11978 -               }
11979 -               dbg_isoc("Appending list on desc 0x%p", temp_sb_desc);
11980 -
11981 -               /* Next pointer must be set before eol is removed. */
11982 -               temp_sb_desc->next = virt_to_phys(urb_priv->first_sb);
11983 -               /* Clear the previous end of list flag since there is a new in the
11984 -                  added SB descriptor list. */
11985 -               temp_sb_desc->command &= ~IO_MASK(USB_SB_command, eol);
11986 -
11987 -               if (!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
11988 -                       /* 8.8.5 in Designer's Reference says we should check for and correct
11989 -                          any errors in the EP here.  That should not be necessary if epid_attn
11990 -                          is handled correctly, so we assume all is ok. */
11991 -                       dbg_isoc("EP disabled");
11992 -                       etrax_usb_check_error_isoc_ep(epid);
11993 -
11994 -                       /* The SB list was exhausted. */
11995 -                       if (virt_to_phys(urb_priv->last_sb) != TxIsocEPList[epid].sub) {
11996 -                               /* The new sublist did not get processed before the EP was
11997 -                                  disabled.  Setup the EP again. */
11998 -                               dbg_isoc("Set EP sub to new list");
11999 -                               TxIsocEPList[epid].hw_len = 0;
12000 -                               TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
12001 -                       }
12002 -               }
12003 -       }
12004 -
12005 -       if (urb->transfer_flags & URB_ISO_ASAP) {
12006 -               /* The isoc transfer should be started as soon as possible. The start_frame
12007 -                  field is a return value if URB_ISO_ASAP was set. Comparing R_USB_FM_NUMBER
12008 -                  with a USB Chief trace shows that the first isoc IN token is sent 2 frames
12009 -                  later. I'm not sure how this affects usage of the start_frame field by the
12010 -                  device driver, or how it affects things when USB_ISO_ASAP is not set, so
12011 -                  therefore there's no compensation for the 2 frame "lag" here. */
12012 -               urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
12013 -               TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
12014 -               urb_priv->urb_state = STARTED;
12015 -               dbg_isoc("URB_ISO_ASAP set, urb->start_frame set to %d", urb->start_frame);
12016 -       } else {
12017 -               /* Not started yet. */
12018 -               urb_priv->urb_state = NOT_STARTED;
12019 -               dbg_isoc("urb_priv->urb_state set to NOT_STARTED");
12020 -       }
12021 -
12022 -       /* We start the DMA sub channel without checking if it's running or not, because:
12023 -         1) If it's already running, issuing the start command is a nop.
12024 -         2) We avoid a test-and-set race condition. */
12025 -       *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
12026 -
12027 -       DBFEXIT;
12028 -}
12029 -
12030 -static void etrax_usb_complete_isoc_urb(struct urb *urb, int status)
12031 -{
12032 -       etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
12033 -       int epid = urb_priv->epid;
12034 -       int auto_resubmit = 0;
12035 -
12036 -       DBFENTER;
12037 -       dbg_isoc("complete urb 0x%p, status %d", urb, status);
12038 -
12039 -       if (status)
12040 -               warn("Completing isoc urb with status %d.", status);
12041 -
12042 -       if (usb_pipein(urb->pipe)) {
12043 -               int i;
12044 -
12045 -               /* Make that all isoc packets have status and length set before
12046 -                  completing the urb. */
12047 -               for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++) {
12048 -                       urb->iso_frame_desc[i].actual_length = 0;
12049 -                       urb->iso_frame_desc[i].status = -EPROTO;
12050 -               }
12051 -
12052 -               urb_list_del(urb, epid);
12053 -
12054 -               if (!list_empty(&urb_list[epid])) {
12055 -                       ((etrax_urb_priv_t *)(urb_list_first(epid)->hcpriv))->urb_state = STARTED;
12056 -               } else {
12057 -                       unsigned long int flags;
12058 -                       if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
12059 -                               /* The EP was enabled, disable it and wait. */
12060 -                               TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
12061 -
12062 -                               /* Ah, the luxury of busy-wait. */
12063 -                               while (*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid]));
12064 -                       }
12065 -
12066 -                       etrax_remove_from_sb_list(urb);
12067 -                       TxIsocEPList[epid].sub = 0;
12068 -                       TxIsocEPList[epid].hw_len = 0;
12069 -
12070 -                       save_flags(flags);
12071 -                       cli();
12072 -                       etrax_usb_free_epid(epid);
12073 -                       restore_flags(flags);
12074 -               }
12075 -
12076 -               urb->hcpriv = 0;
12077 -               kfree(urb_priv);
12078 -
12079 -               /* Release allocated bandwidth. */
12080 -               usb_release_bandwidth(urb->dev, urb, 0);
12081 -       } else if (usb_pipeout(urb->pipe)) {
12082 -               int freed_descr;
12083 -
12084 -               dbg_isoc("Isoc out urb complete 0x%p", urb);
12085 -
12086 -               /* Update the urb list. */
12087 -               urb_list_del(urb, epid);
12088 -
12089 -               freed_descr = etrax_remove_from_sb_list(urb);
12090 -               dbg_isoc("freed %d descriptors of %d packets", freed_descr, urb->number_of_packets);
12091 -               assert(freed_descr == urb->number_of_packets);
12092 -               urb->hcpriv = 0;
12093 -               kfree(urb_priv);
12094 -
12095 -               /* Release allocated bandwidth. */
12096 -               usb_release_bandwidth(urb->dev, urb, 0);
12097 -       }
12098 -
12099 -       urb->status = status;
12100 -       if (urb->complete) {
12101 -               urb->complete(urb, NULL);
12102 -       }
12103 -
12104 -       if (auto_resubmit) {
12105 -               /* Check that urb was not unlinked by the complete callback. */
12106 -               if (__urb_list_entry(urb, epid)) {
12107 -                       /* Move this one down the list. */
12108 -                       urb_list_move_last(urb, epid);
12109 -
12110 -                       /* Mark the now first urb as started (may already be). */
12111 -                       ((etrax_urb_priv_t *)(urb_list_first(epid)->hcpriv))->urb_state = STARTED;
12112 -
12113 -                       /* Must set this to 0 since this urb is still active after
12114 -                          completion. */
12115 -                       urb_priv->isoc_packet_counter = 0;
12116 -               } else {
12117 -                       warn("(ISOC) automatic resubmit urb 0x%p removed by complete.", urb);
12118 -               }
12119 -       }
12120 -
12121 -       DBFEXIT;
12122 -}
12123 -
12124 -static void etrax_usb_complete_urb(struct urb *urb, int status)
12125 -{
12126 -       switch (usb_pipetype(urb->pipe)) {
12127 -       case PIPE_BULK:
12128 -               etrax_usb_complete_bulk_urb(urb, status);
12129 -               break;
12130 -       case PIPE_CONTROL:
12131 -               etrax_usb_complete_ctrl_urb(urb, status);
12132 -               break;
12133 -       case PIPE_INTERRUPT:
12134 -               etrax_usb_complete_intr_urb(urb, status);
12135 -               break;
12136 -       case PIPE_ISOCHRONOUS:
12137 -               etrax_usb_complete_isoc_urb(urb, status);
12138 -               break;
12139 -       default:
12140 -               err("Unknown pipetype");
12141 -       }
12142 -}
12143 -
12144 -
12145 -
12146 -static irqreturn_t etrax_usb_hc_interrupt_top_half(int irq, void *vhc)
12147 -{
12148 -       usb_interrupt_registers_t *reg;
12149 -       unsigned long flags;
12150 -       __u32 irq_mask;
12151 -       __u8 status;
12152 -       __u32 epid_attn;
12153 -       __u16 port_status_1;
12154 -       __u16 port_status_2;
12155 -       __u32 fm_number;
12156 -
12157 -       DBFENTER;
12158 -
12159 -       /* Read critical registers into local variables, do kmalloc afterwards. */
12160 -       save_flags(flags);
12161 -       cli();
12162 -
12163 -       irq_mask = *R_USB_IRQ_MASK_READ;
12164 -       /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that R_USB_STATUS
12165 -          must be read before R_USB_EPID_ATTN since reading the latter clears the
12166 -          ourun and perror fields of R_USB_STATUS. */
12167 -       status = *R_USB_STATUS;
12168 -
12169 -       /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn interrupts. */
12170 -       epid_attn = *R_USB_EPID_ATTN;
12171 -
12172 -       /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
12173 -          port_status interrupt. */
12174 -       port_status_1 = *R_USB_RH_PORT_STATUS_1;
12175 -       port_status_2 = *R_USB_RH_PORT_STATUS_2;
12176 -
12177 -       /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
12178 -       /* Note: the lower 11 bits contain the actual frame number, sent with each sof. */
12179 -       fm_number = *R_USB_FM_NUMBER;
12180 -
12181 -       restore_flags(flags);
12182 -
12183 -       reg = (usb_interrupt_registers_t *)kmem_cache_alloc(top_half_reg_cache, SLAB_ATOMIC);
12184 -
12185 -       assert(reg != NULL);
12186 -
12187 -       reg->hc = (etrax_hc_t *)vhc;
12188 -
12189 -       /* Now put register values into kmalloc'd area. */
12190 -       reg->r_usb_irq_mask_read = irq_mask;
12191 -       reg->r_usb_status = status;
12192 -       reg->r_usb_epid_attn = epid_attn;
12193 -       reg->r_usb_rh_port_status_1 = port_status_1;
12194 -       reg->r_usb_rh_port_status_2 = port_status_2;
12195 -       reg->r_usb_fm_number = fm_number;
12196 -
12197 -        INIT_WORK(&reg->usb_bh, etrax_usb_hc_interrupt_bottom_half, reg);
12198 -        schedule_work(&reg->usb_bh);
12199 -
12200 -       DBFEXIT;
12201 -
12202 -        return IRQ_HANDLED;
12203 -}
12204 -
12205 -static void etrax_usb_hc_interrupt_bottom_half(void *data)
12206 -{
12207 -       usb_interrupt_registers_t *reg = (usb_interrupt_registers_t *)data;
12208 -       __u32 irq_mask = reg->r_usb_irq_mask_read;
12209 -
12210 -       DBFENTER;
12211 -
12212 -       /* Interrupts are handled in order of priority. */
12213 -       if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
12214 -               etrax_usb_hc_epid_attn_interrupt(reg);
12215 -       }
12216 -       if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
12217 -               etrax_usb_hc_port_status_interrupt(reg);
12218 -       }
12219 -       if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
12220 -               etrax_usb_hc_ctl_status_interrupt(reg);
12221 -       }
12222 -       if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
12223 -               etrax_usb_hc_isoc_eof_interrupt();
12224 -       }
12225 -       if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
12226 -               /* Update/restart the bulk start timer since obviously the channel is running. */
12227 -               mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
12228 -               /* Update/restart the bulk eot timer since we just received an bulk eot interrupt. */
12229 -               mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
12230 -
12231 -               etrax_usb_hc_bulk_eot_interrupt(0);
12232 -       }
12233 -
12234 -       kmem_cache_free(top_half_reg_cache, reg);
12235 -
12236 -       DBFEXIT;
12237 -}
12238 -
12239 -
12240 -void etrax_usb_hc_isoc_eof_interrupt(void)
12241 -{
12242 -       struct urb *urb;
12243 -       etrax_urb_priv_t *urb_priv;
12244 -       int epid;
12245 -       unsigned long flags;
12246 -
12247 -       DBFENTER;
12248 -
12249 -       /* Do not check the invalid epid (it has a valid sub pointer). */
12250 -       for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
12251 -
12252 -               /* Do not check the invalid epid (it has a valid sub pointer). */
12253 -               if ((epid == DUMMY_EPID) || (epid == INVALID_EPID))
12254 -                       continue;
12255 -
12256 -               /* Disable interrupts to block the isoc out descriptor interrupt handler
12257 -                  from being called while the isoc EPID list is being checked.
12258 -               */
12259 -               save_flags(flags);
12260 -               cli();
12261 -
12262 -               if (TxIsocEPList[epid].sub == 0) {
12263 -                       /* Nothing here to see. */
12264 -                       restore_flags(flags);
12265 -                       continue;
12266 -               }
12267 -
12268 -               /* Get the first urb (if any). */
12269 -               urb = urb_list_first(epid);
12270 -               if (urb == 0) {
12271 -                       warn("Ignoring NULL urb");
12272 -                       restore_flags(flags);
12273 -                       continue;
12274 -               }
12275 -               if (usb_pipein(urb->pipe)) {
12276 -
12277 -                       /* Sanity check. */
12278 -                       assert(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
12279 -
12280 -                       urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
12281 -                       assert(urb_priv);
12282 -
12283 -                       if (urb_priv->urb_state == NOT_STARTED) {
12284 -
12285 -                               /* If ASAP is not set and urb->start_frame is the current frame,
12286 -                                  start the transfer. */
12287 -                               if (!(urb->transfer_flags & URB_ISO_ASAP) &&
12288 -                                   (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
12289 -
12290 -                                       dbg_isoc("Enabling isoc IN EP descr for epid %d", epid);
12291 -                                       TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
12292 -
12293 -                                       /* This urb is now active. */
12294 -                                       urb_priv->urb_state = STARTED;
12295 -                                       continue;
12296 -                               }
12297 -                       }
12298 -               }
12299 -               restore_flags(flags);
12300 -       }
12301 -
12302 -       DBFEXIT;
12303 -
12304 -}
12305 -
12306 -void etrax_usb_hc_bulk_eot_interrupt(int timer_induced)
12307 -{
12308 -       int epid;
12309 -
12310 -       /* The technique is to run one urb at a time, wait for the eot interrupt at which
12311 -          point the EP descriptor has been disabled. */
12312 -
12313 -       DBFENTER;
12314 -       dbg_bulk("bulk eot%s", timer_induced ? ", called by timer" : "");
12315 -
12316 -       for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
12317 -
12318 -               if (!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
12319 -                   (TxBulkEPList[epid].sub != 0)) {
12320 -
12321 -                       struct urb *urb;
12322 -                       etrax_urb_priv_t *urb_priv;
12323 -                       unsigned long flags;
12324 -                       __u32 r_usb_ept_data;
12325 -
12326 -                       /* Found a disabled EP descriptor which has a non-null sub pointer.
12327 -                          Verify that this ctrl EP descriptor got disabled no errors.
12328 -                          FIXME: Necessary to check error_code? */
12329 -                       dbg_bulk("for epid %d?", epid);
12330 -
12331 -                       /* Get the first urb. */
12332 -                       urb = urb_list_first(epid);
12333 -
12334 -                       /* FIXME: Could this happen for valid reasons? Why did it disappear? Because of
12335 -                          wrong unlinking? */
12336 -                       if (!urb) {
12337 -                               warn("NULL urb for epid %d", epid);
12338 -                               continue;
12339 -                       }
12340 -
12341 -                       assert(urb);
12342 -                       urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
12343 -                       assert(urb_priv);
12344 -
12345 -                       /* Sanity checks. */
12346 -                       assert(usb_pipetype(urb->pipe) == PIPE_BULK);
12347 -                       if (phys_to_virt(TxBulkEPList[epid].sub) != urb_priv->last_sb) {
12348 -                               err("bulk endpoint got disabled before reaching last sb");
12349 -                       }
12350 -
12351 -                       /* For bulk IN traffic, there seems to be a race condition between
12352 -                          between the bulk eot and eop interrupts, or rather an uncertainty regarding
12353 -                          the order in which they happen. Normally we expect the eop interrupt from
12354 -                          DMA channel 9 to happen before the eot interrupt.
12355 -
12356 -                          Therefore, we complete the bulk IN urb in the rx interrupt handler instead. */
12357 -
12358 -                       if (usb_pipein(urb->pipe)) {
12359 -                               dbg_bulk("in urb, continuing");
12360 -                               continue;
12361 -                       }
12362 -
12363 -                       save_flags(flags);
12364 -                       cli();
12365 -                       *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
12366 -                       nop();
12367 -                       r_usb_ept_data = *R_USB_EPT_DATA;
12368 -                       restore_flags(flags);
12369 -
12370 -                       if (IO_EXTRACT(R_USB_EPT_DATA, error_code, r_usb_ept_data) ==
12371 -                           IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
12372 -                               /* This means that the endpoint has no error, is disabled
12373 -                                  and had inserted traffic, i.e. transfer successfully completed. */
12374 -                               etrax_usb_complete_bulk_urb(urb, 0);
12375 -                       } else {
12376 -                               /* Shouldn't happen. We expect errors to be caught by epid attention. */
12377 -                               err("Found disabled bulk EP desc, error_code != no_error");
12378 -                       }
12379 -               }
12380 -       }
12381 -
12382 -       /* Normally, we should find (at least) one disabled EP descriptor with a valid sub pointer.
12383 -          However, because of the uncertainty in the deliverance of the eop/eot interrupts, we may
12384 -          not.  Also, we might find two disabled EPs when handling an eot interrupt, and then find
12385 -          none the next time. */
12386 -
12387 -       DBFEXIT;
12388 -
12389 -}
12390 -
12391 -void etrax_usb_hc_epid_attn_interrupt(usb_interrupt_registers_t *reg)
12392 -{
12393 -       /* This function handles the epid attention interrupt.  There are a variety of reasons
12394 -          for this interrupt to happen (Designer's Reference, p. 8 - 22 for the details):
12395 -
12396 -          invalid ep_id  - Invalid epid in an EP (EP disabled).
12397 -          stall          - Not strictly an error condition (EP disabled).
12398 -          3rd error      - Three successive transaction errors  (EP disabled).
12399 -          buffer ourun   - Buffer overrun or underrun (EP disabled).
12400 -          past eof1      - Intr or isoc transaction proceeds past EOF1.
12401 -          near eof       - Intr or isoc transaction would not fit inside the frame.
12402 -          zout transfer  - If zout transfer for a bulk endpoint (EP disabled).
12403 -          setup transfer - If setup transfer for a non-ctrl endpoint (EP disabled). */
12404 -
12405 -       int epid;
12406 -
12407 -
12408 -       DBFENTER;
12409 -
12410 -       assert(reg != NULL);
12411 -
12412 -       /* Note that we loop through all epids. We still want to catch errors for
12413 -          the invalid one, even though we might handle them differently. */
12414 -       for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
12415 -
12416 -               if (test_bit(epid, (void *)&reg->r_usb_epid_attn)) {
12417 -
12418 -                       struct urb *urb;
12419 -                       __u32 r_usb_ept_data;
12420 -                       unsigned long flags;
12421 -                       int error_code;
12422 -
12423 -                       save_flags(flags);
12424 -                       cli();
12425 -                       *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
12426 -                       nop();
12427 -                       /* Note that although there are separate R_USB_EPT_DATA and R_USB_EPT_DATA_ISO
12428 -                          registers, they are located at the same address and are of the same size.
12429 -                          In other words, this read should be ok for isoc also. */
12430 -                       r_usb_ept_data = *R_USB_EPT_DATA;
12431 -                       restore_flags(flags);
12432 -
12433 -                       /* First some sanity checks. */
12434 -                       if (epid == INVALID_EPID) {
12435 -                               /* FIXME: What if it became disabled? Could seriously hurt interrupt
12436 -                                  traffic. (Use do_intr_recover.) */
12437 -                               warn("Got epid_attn for INVALID_EPID (%d).", epid);
12438 -                               err("R_USB_EPT_DATA = 0x%x", r_usb_ept_data);
12439 -                               err("R_USB_STATUS = 0x%x", reg->r_usb_status);
12440 -                               continue;
12441 -                       } else  if (epid == DUMMY_EPID) {
12442 -                               /* We definitely don't care about these ones. Besides, they are
12443 -                                  always disabled, so any possible disabling caused by the
12444 -                                  epid attention interrupt is irrelevant. */
12445 -                               warn("Got epid_attn for DUMMY_EPID (%d).", epid);
12446 -                               continue;
12447 -                       }
12448 -
12449 -                       /* Get the first urb in the urb list for this epid. We blatantly assume
12450 -                          that only the first urb could have caused the epid attention.
12451 -                          (For bulk and ctrl, only one urb is active at any one time. For intr
12452 -                          and isoc we remove them once they are completed.) */
12453 -                       urb = urb_list_first(epid);
12454 -
12455 -                       if (urb == NULL) {
12456 -                               err("Got epid_attn for epid %i with no urb.", epid);
12457 -                               err("R_USB_EPT_DATA = 0x%x", r_usb_ept_data);
12458 -                               err("R_USB_STATUS = 0x%x", reg->r_usb_status);
12459 -                               continue;
12460 -                       }
12461 -
12462 -                       switch (usb_pipetype(urb->pipe)) {
12463 -                       case PIPE_BULK:
12464 -                               warn("Got epid attn for bulk endpoint, epid %d", epid);
12465 -                               break;
12466 -                       case PIPE_CONTROL:
12467 -                               warn("Got epid attn for control endpoint, epid %d", epid);
12468 -                               break;
12469 -                       case PIPE_INTERRUPT:
12470 -                               warn("Got epid attn for interrupt endpoint, epid %d", epid);
12471 -                               break;
12472 -                       case PIPE_ISOCHRONOUS:
12473 -                               warn("Got epid attn for isochronous endpoint, epid %d", epid);
12474 -                               break;
12475 -                       }
12476 -
12477 -                       if (usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) {
12478 -                               if (r_usb_ept_data & IO_MASK(R_USB_EPT_DATA, hold)) {
12479 -                                       warn("Hold was set for epid %d.", epid);
12480 -                                       continue;
12481 -                               }
12482 -                       }
12483 -
12484 -                       /* Even though error_code occupies bits 22 - 23 in both R_USB_EPT_DATA and
12485 -                          R_USB_EPT_DATA_ISOC, we separate them here so we don't forget in other places. */
12486 -                       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
12487 -                               error_code = IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data);
12488 -                       } else {
12489 -                               error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, r_usb_ept_data);
12490 -                       }
12491 -
12492 -                       /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
12493 -                       if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
12494 -
12495 -                               /* Isoc traffic doesn't have error_count_in/error_count_out. */
12496 -                               if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
12497 -                                   (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, r_usb_ept_data) == 3 ||
12498 -                                    IO_EXTRACT(R_USB_EPT_DATA, error_count_out, r_usb_ept_data) == 3)) {
12499 -                                       /* 3rd error. */
12500 -                                       warn("3rd error for epid %i", epid);
12501 -                                       etrax_usb_complete_urb(urb, -EPROTO);
12502 -
12503 -                               } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
12504 -
12505 -                                       warn("Perror for epid %d", epid);
12506 -
12507 -                                       if (!(r_usb_ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
12508 -                                               /* invalid ep_id */
12509 -                                               panic("Perror because of invalid epid."
12510 -                                                     " Deconfigured too early?");
12511 -                                       } else {
12512 -                                               /* past eof1, near eof, zout transfer, setup transfer */
12513 -
12514 -                                               /* Dump the urb and the relevant EP descriptor list. */
12515 -
12516 -                                               __dump_urb(urb);
12517 -                                               __dump_ept_data(epid);
12518 -                                               __dump_ep_list(usb_pipetype(urb->pipe));
12519 -
12520 -                                               panic("Something wrong with DMA descriptor contents."
12521 -                                                     " Too much traffic inserted?");
12522 -                                       }
12523 -                               } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
12524 -                                       /* buffer ourun */
12525 -                                       panic("Buffer overrun/underrun for epid %d. DMA too busy?", epid);
12526 -                               }
12527 -
12528 -                       } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, stall)) {
12529 -                               /* Not really a protocol error, just says that the endpoint gave
12530 -                                  a stall response. Note that error_code cannot be stall for isoc. */
12531 -                               if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
12532 -                                       panic("Isoc traffic cannot stall");
12533 -                               }
12534 -
12535 -                               warn("Stall for epid %d", epid);
12536 -                               etrax_usb_complete_urb(urb, -EPIPE);
12537 -
12538 -                       } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, bus_error)) {
12539 -                               /* Two devices responded to a transaction request. Must be resolved
12540 -                                  by software. FIXME: Reset ports? */
12541 -                               panic("Bus error for epid %d."
12542 -                                     " Two devices responded to transaction request",
12543 -                                     epid);
12544 -
12545 -                       } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, buffer_error)) {
12546 -                               /* DMA overrun or underrun. */
12547 -                               warn("Buffer overrun/underrun for epid %d. DMA too busy?", epid);
12548 -
12549 -                               /* It seems that error_code = buffer_error in
12550 -                                  R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
12551 -                                  are the same error. */
12552 -                               etrax_usb_complete_urb(urb, -EPROTO);
12553 -                       }
12554 -               }
12555 -       }
12556 -
12557 -       DBFEXIT;
12558 -
12559 -}
12560 -
12561 -void etrax_usb_bulk_start_timer_func(unsigned long dummy)
12562 -{
12563 -
12564 -       /* We might enable an EP descriptor behind the current DMA position when it's about
12565 -          to decide that there are no more bulk traffic and it should stop the bulk channel.
12566 -          Therefore we periodically check if the bulk channel is stopped and there is an
12567 -          enabled bulk EP descriptor, in which case we start the bulk channel. */
12568 -       dbg_bulk("bulk_start_timer timed out.");
12569 -
12570 -       if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
12571 -               int epid;
12572 -
12573 -               dbg_bulk("Bulk DMA channel not running.");
12574 -
12575 -               for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
12576 -                       if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
12577 -                               dbg_bulk("Found enabled EP for epid %d, starting bulk channel.\n",
12578 -                                        epid);
12579 -                               *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
12580 -
12581 -                               /* Restart the bulk eot timer since we just started the bulk channel. */
12582 -                               mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
12583 -
12584 -                               /* No need to search any further. */
12585 -                               break;
12586 -                       }
12587 -               }
12588 -       } else {
12589 -               dbg_bulk("Bulk DMA channel running.");
12590 -       }
12591 -}
12592 -
12593 -void etrax_usb_hc_port_status_interrupt(usb_interrupt_registers_t *reg)
12594 -{
12595 -       etrax_hc_t *hc = reg->hc;
12596 -       __u16 r_usb_rh_port_status_1 = reg->r_usb_rh_port_status_1;
12597 -       __u16 r_usb_rh_port_status_2 = reg->r_usb_rh_port_status_2;
12598 -
12599 -       DBFENTER;
12600 -
12601 -       /* The Etrax RH does not include a wPortChange register, so this has to be handled in software
12602 -          (by saving the old port status value for comparison when the port status interrupt happens).
12603 -          See section 11.16.2.6.2 in the USB 1.1 spec for details. */
12604 -
12605 -       dbg_rh("hc->rh.prev_wPortStatus_1 = 0x%x", hc->rh.prev_wPortStatus_1);
12606 -       dbg_rh("hc->rh.prev_wPortStatus_2 = 0x%x", hc->rh.prev_wPortStatus_2);
12607 -       dbg_rh("r_usb_rh_port_status_1 = 0x%x", r_usb_rh_port_status_1);
12608 -       dbg_rh("r_usb_rh_port_status_2 = 0x%x", r_usb_rh_port_status_2);
12609 -
12610 -       /* C_PORT_CONNECTION is set on any transition. */
12611 -       hc->rh.wPortChange_1 |=
12612 -               ((r_usb_rh_port_status_1 & (1 << RH_PORT_CONNECTION)) !=
12613 -                (hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_CONNECTION))) ?
12614 -               (1 << RH_PORT_CONNECTION) : 0;
12615 -
12616 -       hc->rh.wPortChange_2 |=
12617 -               ((r_usb_rh_port_status_2 & (1 << RH_PORT_CONNECTION)) !=
12618 -                (hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_CONNECTION))) ?
12619 -               (1 << RH_PORT_CONNECTION) : 0;
12620 -
12621 -       /* C_PORT_ENABLE is _only_ set on a one to zero transition, i.e. when
12622 -          the port is disabled, not when it's enabled. */
12623 -       hc->rh.wPortChange_1 |=
12624 -               ((hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_ENABLE))
12625 -                && !(r_usb_rh_port_status_1 & (1 << RH_PORT_ENABLE))) ?
12626 -               (1 << RH_PORT_ENABLE) : 0;
12627 -
12628 -       hc->rh.wPortChange_2 |=
12629 -               ((hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_ENABLE))
12630 -                && !(r_usb_rh_port_status_2 & (1 << RH_PORT_ENABLE))) ?
12631 -               (1 << RH_PORT_ENABLE) : 0;
12632 -
12633 -       /* C_PORT_SUSPEND is set to one when the device has transitioned out
12634 -          of the suspended state, i.e. when suspend goes from one to zero. */
12635 -       hc->rh.wPortChange_1 |=
12636 -               ((hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_SUSPEND))
12637 -                && !(r_usb_rh_port_status_1 & (1 << RH_PORT_SUSPEND))) ?
12638 -               (1 << RH_PORT_SUSPEND) : 0;
12639 -
12640 -       hc->rh.wPortChange_2 |=
12641 -               ((hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_SUSPEND))
12642 -                && !(r_usb_rh_port_status_2 & (1 << RH_PORT_SUSPEND))) ?
12643 -               (1 << RH_PORT_SUSPEND) : 0;
12644 -
12645 -
12646 -       /* C_PORT_RESET is set when reset processing on this port is complete. */
12647 -       hc->rh.wPortChange_1 |=
12648 -               ((hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_RESET))
12649 -                && !(r_usb_rh_port_status_1 & (1 << RH_PORT_RESET))) ?
12650 -               (1 << RH_PORT_RESET) : 0;
12651 -
12652 -       hc->rh.wPortChange_2 |=
12653 -               ((hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_RESET))
12654 -                && !(r_usb_rh_port_status_2 & (1 << RH_PORT_RESET))) ?
12655 -               (1 << RH_PORT_RESET) : 0;
12656 -
12657 -       /* Save the new values for next port status change. */
12658 -       hc->rh.prev_wPortStatus_1 = r_usb_rh_port_status_1;
12659 -       hc->rh.prev_wPortStatus_2 = r_usb_rh_port_status_2;
12660 -
12661 -       dbg_rh("hc->rh.wPortChange_1 set to 0x%x", hc->rh.wPortChange_1);
12662 -       dbg_rh("hc->rh.wPortChange_2 set to 0x%x", hc->rh.wPortChange_2);
12663 -
12664 -       DBFEXIT;
12665 -
12666 -}
12667 -
12668 -void etrax_usb_hc_ctl_status_interrupt(usb_interrupt_registers_t *reg)
12669 -{
12670 -       DBFENTER;
12671 -
12672 -       /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
12673 -          list for the corresponding epid? */
12674 -       if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
12675 -               panic("USB controller got ourun.");
12676 -       }
12677 -       if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
12678 -
12679 -               /* Before, etrax_usb_do_intr_recover was called on this epid if it was
12680 -                  an interrupt pipe. I don't see how re-enabling all EP descriptors
12681 -                  will help if there was a programming error. */
12682 -               panic("USB controller got perror.");
12683 -       }
12684 -
12685 -       if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
12686 -               /* We should never operate in device mode. */
12687 -               panic("USB controller in device mode.");
12688 -       }
12689 -
12690 -       /* These if-statements could probably be nested. */
12691 -       if (reg->r_usb_status & IO_MASK(R_USB_STATUS, host_mode)) {
12692 -               info("USB controller in host mode.");
12693 -       }
12694 -       if (reg->r_usb_status & IO_MASK(R_USB_STATUS, started)) {
12695 -               info("USB controller started.");
12696 -       }
12697 -       if (reg->r_usb_status & IO_MASK(R_USB_STATUS, running)) {
12698 -               info("USB controller running.");
12699 -       }
12700 -
12701 -       DBFEXIT;
12702 -
12703 -}
12704 -
12705 -
12706 -static int etrax_rh_submit_urb(struct urb *urb)
12707 -{
12708 -       struct usb_device *usb_dev = urb->dev;
12709 -       etrax_hc_t *hc = usb_dev->bus->hcpriv;
12710 -       unsigned int pipe = urb->pipe;
12711 -       struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *) urb->setup_packet;
12712 -       void *data = urb->transfer_buffer;
12713 -       int leni = urb->transfer_buffer_length;
12714 -       int len = 0;
12715 -       int stat = 0;
12716 -
12717 -       __u16 bmRType_bReq;
12718 -       __u16 wValue;
12719 -       __u16 wIndex;
12720 -       __u16 wLength;
12721 -
12722 -       DBFENTER;
12723 -
12724 -       /* FIXME: What is this interrupt urb that is sent to the root hub? */
12725 -       if (usb_pipetype (pipe) == PIPE_INTERRUPT) {
12726 -               dbg_rh("Root-Hub submit IRQ: every %d ms", urb->interval);
12727 -               hc->rh.urb = urb;
12728 -               hc->rh.send = 1;
12729 -               /* FIXME: We could probably remove this line since it's done
12730 -                  in etrax_rh_init_int_timer. (Don't remove it from
12731 -                  etrax_rh_init_int_timer though.) */
12732 -               hc->rh.interval = urb->interval;
12733 -               etrax_rh_init_int_timer(urb);
12734 -               DBFEXIT;
12735 -
12736 -               return 0;
12737 -       }
12738 -
12739 -       bmRType_bReq = cmd->bRequestType | (cmd->bRequest << 8);
12740 -       wValue = le16_to_cpu(cmd->wValue);
12741 -       wIndex = le16_to_cpu(cmd->wIndex);
12742 -       wLength = le16_to_cpu(cmd->wLength);
12743 -
12744 -       dbg_rh("bmRType_bReq : 0x%04x (%d)", bmRType_bReq, bmRType_bReq);
12745 -       dbg_rh("wValue       : 0x%04x (%d)", wValue, wValue);
12746 -       dbg_rh("wIndex       : 0x%04x (%d)", wIndex, wIndex);
12747 -       dbg_rh("wLength      : 0x%04x (%d)", wLength, wLength);
12748 -
12749 -       switch (bmRType_bReq) {
12750 -
12751 -               /* Request Destination:
12752 -                  without flags: Device,
12753 -                  RH_INTERFACE: interface,
12754 -                  RH_ENDPOINT: endpoint,
12755 -                  RH_CLASS means HUB here,
12756 -                  RH_OTHER | RH_CLASS  almost ever means HUB_PORT here
12757 -                */
12758 -
12759 -       case RH_GET_STATUS:
12760 -               *(__u16 *) data = cpu_to_le16 (1);
12761 -               OK (2);
12762 -
12763 -       case RH_GET_STATUS | RH_INTERFACE:
12764 -               *(__u16 *) data = cpu_to_le16 (0);
12765 -               OK (2);
12766 -
12767 -       case RH_GET_STATUS | RH_ENDPOINT:
12768 -               *(__u16 *) data = cpu_to_le16 (0);
12769 -               OK (2);
12770 -
12771 -       case RH_GET_STATUS | RH_CLASS:
12772 -               *(__u32 *) data = cpu_to_le32 (0);
12773 -               OK (4);         /* hub power ** */
12774 -
12775 -       case RH_GET_STATUS | RH_OTHER | RH_CLASS:
12776 -               if (wIndex == 1) {
12777 -                       *((__u16*)data) = cpu_to_le16(hc->rh.prev_wPortStatus_1);
12778 -                       *((__u16*)data + 1) = cpu_to_le16(hc->rh.wPortChange_1);
12779 -               } else if (wIndex == 2) {
12780 -                       *((__u16*)data) = cpu_to_le16(hc->rh.prev_wPortStatus_2);
12781 -                       *((__u16*)data + 1) = cpu_to_le16(hc->rh.wPortChange_2);
12782 -               } else {
12783 -                       dbg_rh("RH_GET_STATUS whith invalid wIndex!");
12784 -                       OK(0);
12785 -               }
12786 -
12787 -               OK(4);
12788 -
12789 -       case RH_CLEAR_FEATURE | RH_ENDPOINT:
12790 -               switch (wValue) {
12791 -               case (RH_ENDPOINT_STALL):
12792 -                       OK (0);
12793 -               }
12794 -               break;
12795 -
12796 -       case RH_CLEAR_FEATURE | RH_CLASS:
12797 -               switch (wValue) {
12798 -               case (RH_C_HUB_OVER_CURRENT):
12799 -                       OK (0); /* hub power over current ** */
12800 -               }
12801 -               break;
12802 -
12803 -       case RH_CLEAR_FEATURE | RH_OTHER | RH_CLASS:
12804 -               switch (wValue) {
12805 -               case (RH_PORT_ENABLE):
12806 -                       if (wIndex == 1) {
12807 -
12808 -                               dbg_rh("trying to do disable port 1");
12809 -
12810 -                               *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
12811 -
12812 -                               while (hc->rh.prev_wPortStatus_1 &
12813 -                                      IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes));
12814 -                               *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
12815 -                               dbg_rh("Port 1 is disabled");
12816 -
12817 -                       } else if (wIndex == 2) {
12818 -
12819 -                               dbg_rh("trying to do disable port 2");
12820 -
12821 -                               *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
12822 -
12823 -                               while (hc->rh.prev_wPortStatus_2 &
12824 -                                      IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes));
12825 -                               *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
12826 -                               dbg_rh("Port 2 is disabled");
12827 -
12828 -                       } else {
12829 -                               dbg_rh("RH_CLEAR_FEATURE->RH_PORT_ENABLE "
12830 -                                      "with invalid wIndex == %d!", wIndex);
12831 -                       }
12832 -
12833 -                       OK (0);
12834 -               case (RH_PORT_SUSPEND):
12835 -                       /* Opposite to suspend should be resume, so we'll do a resume. */
12836 -                       /* FIXME: USB 1.1, 11.16.2.2 says:
12837 -                          "Clearing the PORT_SUSPEND feature causes a host-initiated resume
12838 -                          on the specified port. If the port is not in the Suspended state,
12839 -                          the hub should treat this request as a functional no-operation."
12840 -                          Shouldn't we check if the port is in a suspended state before
12841 -                          resuming? */
12842 -
12843 -                       /* Make sure the controller isn't busy. */
12844 -                       while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
12845 -
12846 -                       if (wIndex == 1) {
12847 -                               *R_USB_COMMAND =
12848 -                                       IO_STATE(R_USB_COMMAND, port_sel, port1) |
12849 -                                       IO_STATE(R_USB_COMMAND, port_cmd, resume) |
12850 -                                       IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
12851 -                       } else if (wIndex == 2) {
12852 -                               *R_USB_COMMAND =
12853 -                                       IO_STATE(R_USB_COMMAND, port_sel, port2) |
12854 -                                       IO_STATE(R_USB_COMMAND, port_cmd, resume) |
12855 -                                       IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
12856 -                       } else {
12857 -                               dbg_rh("RH_CLEAR_FEATURE->RH_PORT_SUSPEND "
12858 -                                      "with invalid wIndex == %d!", wIndex);
12859 -                       }
12860 -
12861 -                       OK (0);
12862 -               case (RH_PORT_POWER):
12863 -                       OK (0); /* port power ** */
12864 -               case (RH_C_PORT_CONNECTION):
12865 -                       if (wIndex == 1) {
12866 -                               hc->rh.wPortChange_1 &= ~(1 << RH_PORT_CONNECTION);
12867 -                       } else if (wIndex == 2) {
12868 -                               hc->rh.wPortChange_2 &= ~(1 << RH_PORT_CONNECTION);
12869 -                       } else {
12870 -                               dbg_rh("RH_CLEAR_FEATURE->RH_C_PORT_CONNECTION "
12871 -                                      "with invalid wIndex == %d!", wIndex);
12872 -                       }
12873 -
12874 -                       OK (0);
12875 -               case (RH_C_PORT_ENABLE):
12876 -                       if (wIndex == 1) {
12877 -                               hc->rh.wPortChange_1 &= ~(1 << RH_PORT_ENABLE);
12878 -                       } else if (wIndex == 2) {
12879 -                               hc->rh.wPortChange_2 &= ~(1 << RH_PORT_ENABLE);
12880 -                       } else {
12881 -                               dbg_rh("RH_CLEAR_FEATURE->RH_C_PORT_ENABLE "
12882 -                                      "with invalid wIndex == %d!", wIndex);
12883 -                       }
12884 -                       OK (0);
12885 -               case (RH_C_PORT_SUSPEND):
12886 -/*** WR_RH_PORTSTAT(RH_PS_PSSC); */
12887 -                       OK (0);
12888 -               case (RH_C_PORT_OVER_CURRENT):
12889 -                       OK (0); /* port power over current ** */
12890 -               case (RH_C_PORT_RESET):
12891 -                       if (wIndex == 1) {
12892 -                               hc->rh.wPortChange_1 &= ~(1 << RH_PORT_RESET);
12893 -                       } else if (wIndex == 2) {
12894 -                               hc->rh.wPortChange_2 &= ~(1 << RH_PORT_RESET);
12895 -                       } else {
12896 -                               dbg_rh("RH_CLEAR_FEATURE->RH_C_PORT_RESET "
12897 -                                      "with invalid index == %d!", wIndex);
12898 -                       }
12899 -
12900 -                       OK (0);
12901 -
12902 -               }
12903 -               break;
12904 -
12905 -       case RH_SET_FEATURE | RH_OTHER | RH_CLASS:
12906 -               switch (wValue) {
12907 -               case (RH_PORT_SUSPEND):
12908 -
12909 -                       /* Make sure the controller isn't busy. */
12910 -                       while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
12911 -
12912 -                       if (wIndex == 1) {
12913 -                               *R_USB_COMMAND =
12914 -                                       IO_STATE(R_USB_COMMAND, port_sel, port1) |
12915 -                                       IO_STATE(R_USB_COMMAND, port_cmd, suspend) |
12916 -                                       IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
12917 -                       } else if (wIndex == 2) {
12918 -                               *R_USB_COMMAND =
12919 -                                       IO_STATE(R_USB_COMMAND, port_sel, port2) |
12920 -                                       IO_STATE(R_USB_COMMAND, port_cmd, suspend) |
12921 -                                       IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
12922 -                       } else {
12923 -                               dbg_rh("RH_SET_FEATURE->RH_PORT_SUSPEND "
12924 -                                      "with invalid wIndex == %d!", wIndex);
12925 -                       }
12926 -
12927 -                       OK (0);
12928 -               case (RH_PORT_RESET):
12929 -                       if (wIndex == 1) {
12930 -
12931 -                       port_1_reset:
12932 -                               dbg_rh("Doing reset of port 1");
12933 -
12934 -                               /* Make sure the controller isn't busy. */
12935 -                               while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
12936 -
12937 -                               *R_USB_COMMAND =
12938 -                                       IO_STATE(R_USB_COMMAND, port_sel, port1) |
12939 -                                       IO_STATE(R_USB_COMMAND, port_cmd, reset) |
12940 -                                       IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
12941 -
12942 -                               /* We must wait at least 10 ms for the device to recover.
12943 -                                  15 ms should be enough. */
12944 -                               udelay(15000);
12945 -
12946 -                               /* Wait for reset bit to go low (should be done by now). */
12947 -                               while (hc->rh.prev_wPortStatus_1 &
12948 -                                      IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes));
12949 -
12950 -                               /* If the port status is
12951 -                                  1) connected and enabled then there is a device and everything is fine
12952 -                                  2) neither connected nor enabled then there is no device, also fine
12953 -                                  3) connected and not enabled then we try again
12954 -                                  (Yes, there are other port status combinations besides these.) */
12955 -
12956 -                               if ((hc->rh.prev_wPortStatus_1 &
12957 -                                    IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) &&
12958 -                                   (hc->rh.prev_wPortStatus_1 &
12959 -                                    IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, no))) {
12960 -                                       dbg_rh("Connected device on port 1, but port not enabled?"
12961 -                                              " Trying reset again.");
12962 -                                       goto port_2_reset;
12963 -                               }
12964 -
12965 -                               /* Diagnostic printouts. */
12966 -                               if ((hc->rh.prev_wPortStatus_1 &
12967 -                                    IO_STATE(R_USB_RH_PORT_STATUS_1, connected, no)) &&
12968 -                                   (hc->rh.prev_wPortStatus_1 &
12969 -                                    IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, no))) {
12970 -                                       dbg_rh("No connected device on port 1");
12971 -                               } else if ((hc->rh.prev_wPortStatus_1 &
12972 -                                           IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) &&
12973 -                                          (hc->rh.prev_wPortStatus_1 &
12974 -                                           IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes))) {
12975 -                                       dbg_rh("Connected device on port 1, port 1 enabled");
12976 -                               }
12977 -
12978 -                       } else if (wIndex == 2) {
12979 -
12980 -                       port_2_reset:
12981 -                               dbg_rh("Doing reset of port 2");
12982 -
12983 -                               /* Make sure the controller isn't busy. */
12984 -                               while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
12985 -
12986 -                               /* Issue the reset command. */
12987 -                               *R_USB_COMMAND =
12988 -                                       IO_STATE(R_USB_COMMAND, port_sel, port2) |
12989 -                                       IO_STATE(R_USB_COMMAND, port_cmd, reset) |
12990 -                                       IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
12991 -
12992 -                               /* We must wait at least 10 ms for the device to recover.
12993 -                                  15 ms should be enough. */
12994 -                               udelay(15000);
12995 -
12996 -                               /* Wait for reset bit to go low (should be done by now). */
12997 -                               while (hc->rh.prev_wPortStatus_2 &
12998 -                                      IO_STATE(R_USB_RH_PORT_STATUS_2, reset, yes));
12999 -
13000 -                               /* If the port status is
13001 -                                  1) connected and enabled then there is a device and everything is fine
13002 -                                  2) neither connected nor enabled then there is no device, also fine
13003 -                                  3) connected and not enabled then we try again
13004 -                                  (Yes, there are other port status combinations besides these.) */
13005 -
13006 -                               if ((hc->rh.prev_wPortStatus_2 &
13007 -                                    IO_STATE(R_USB_RH_PORT_STATUS_2, connected, yes)) &&
13008 -                                   (hc->rh.prev_wPortStatus_2 &
13009 -                                    IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, no))) {
13010 -                                       dbg_rh("Connected device on port 2, but port not enabled?"
13011 -                                              " Trying reset again.");
13012 -                                       goto port_2_reset;
13013 -                               }
13014 -
13015 -                               /* Diagnostic printouts. */
13016 -                               if ((hc->rh.prev_wPortStatus_2 &
13017 -                                    IO_STATE(R_USB_RH_PORT_STATUS_2, connected, no)) &&
13018 -                                   (hc->rh.prev_wPortStatus_2 &
13019 -                                    IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, no))) {
13020 -                                       dbg_rh("No connected device on port 2");
13021 -                               } else if ((hc->rh.prev_wPortStatus_2 &
13022 -                                           IO_STATE(R_USB_RH_PORT_STATUS_2, connected, yes)) &&
13023 -                                          (hc->rh.prev_wPortStatus_2 &
13024 -                                           IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes))) {
13025 -                                       dbg_rh("Connected device on port 2, port 2 enabled");
13026 -                               }
13027 -
13028 -                       } else {
13029 -                               dbg_rh("RH_SET_FEATURE->RH_PORT_RESET with invalid wIndex = %d", wIndex);
13030 -                       }
13031 -
13032 -                       /* Make sure the controller isn't busy. */
13033 -                       while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
13034 -
13035 -                       /* If all enabled ports were disabled the host controller goes down into
13036 -                          started mode, so we need to bring it back into the running state.
13037 -                          (This is safe even if it's already in the running state.) */
13038 -                       *R_USB_COMMAND =
13039 -                               IO_STATE(R_USB_COMMAND, port_sel, nop) |
13040 -                               IO_STATE(R_USB_COMMAND, port_cmd, reset) |
13041 -                               IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
13042 -
13043 -                       dbg_rh("...Done");
13044 -                       OK(0);
13045 -
13046 -               case (RH_PORT_POWER):
13047 -                       OK (0); /* port power ** */
13048 -               case (RH_PORT_ENABLE):
13049 -                       /* There is no port enable command in the host controller, so if the
13050 -                          port is already enabled, we do nothing. If not, we reset the port
13051 -                          (with an ugly goto). */
13052 -
13053 -                       if (wIndex == 1) {
13054 -                               if (hc->rh.prev_wPortStatus_1 &
13055 -                                   IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, no)) {
13056 -                                       goto port_1_reset;
13057 -                               }
13058 -                       } else if (wIndex == 2) {
13059 -                               if (hc->rh.prev_wPortStatus_2 &
13060 -                                   IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, no)) {
13061 -                                       goto port_2_reset;
13062 -                               }
13063 -                       } else {
13064 -                               dbg_rh("RH_SET_FEATURE->RH_GET_STATUS with invalid wIndex = %d", wIndex);
13065 -                       }
13066 -                       OK (0);
13067 -               }
13068 -               break;
13069 -
13070 -       case RH_SET_ADDRESS:
13071 -               hc->rh.devnum = wValue;
13072 -               dbg_rh("RH address set to: %d", hc->rh.devnum);
13073 -               OK (0);
13074 -
13075 -       case RH_GET_DESCRIPTOR:
13076 -               switch ((wValue & 0xff00) >> 8) {
13077 -               case (0x01):    /* device descriptor */
13078 -                       len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_dev_des), wLength));
13079 -                       memcpy (data, root_hub_dev_des, len);
13080 -                       OK (len);
13081 -               case (0x02):    /* configuration descriptor */
13082 -                       len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_config_des), wLength));
13083 -                       memcpy (data, root_hub_config_des, len);
13084 -                       OK (len);
13085 -               case (0x03):    /* string descriptors */
13086 -                       len = usb_root_hub_string (wValue & 0xff,
13087 -                                                  0xff, "ETRAX 100LX",
13088 -                                                  data, wLength);
13089 -                       if (len > 0) {
13090 -                               OK(min(leni, len));
13091 -                       } else {
13092 -                               stat = -EPIPE;
13093 -                       }
13094 -
13095 -               }
13096 -               break;
13097 -
13098 -       case RH_GET_DESCRIPTOR | RH_CLASS:
13099 -               root_hub_hub_des[2] = hc->rh.numports;
13100 -               len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_hub_des), wLength));
13101 -               memcpy (data, root_hub_hub_des, len);
13102 -               OK (len);
13103 -
13104 -       case RH_GET_CONFIGURATION:
13105 -               *(__u8 *) data = 0x01;
13106 -               OK (1);
13107 -
13108 -       case RH_SET_CONFIGURATION:
13109 -               OK (0);
13110 -
13111 -       default:
13112 -               stat = -EPIPE;
13113 -       }
13114 -
13115 -       urb->actual_length = len;
13116 -       urb->status = stat;
13117 -       urb->dev = NULL;
13118 -       if (urb->complete) {
13119 -               urb->complete(urb, NULL);
13120 -       }
13121 -       DBFEXIT;
13122 -
13123 -       return 0;
13124 -}
13125 -
13126 -static void
13127 -etrax_usb_bulk_eot_timer_func(unsigned long dummy)
13128 -{
13129 -       /* Because of a race condition in the top half, we might miss a bulk eot.
13130 -          This timer "simulates" a bulk eot if we don't get one for a while, hopefully
13131 -          correcting the situation. */
13132 -       dbg_bulk("bulk_eot_timer timed out.");
13133 -       etrax_usb_hc_bulk_eot_interrupt(1);
13134 -}
13135 -
13136 -static void*
13137 -etrax_usb_buffer_alloc(struct usb_bus* bus, size_t size,
13138 -       unsigned mem_flags, dma_addr_t *dma)
13139 -{
13140 -  return kmalloc(size, mem_flags);
13141 -}
13142 -
13143 -static void
13144 -etrax_usb_buffer_free(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma)
13145 -{
13146 -  kfree(addr);
13147 -}
13148 -
13149 -
13150 -static struct device fake_device;
13151 -
13152 -static int __init etrax_usb_hc_init(void)
13153 -{
13154 -       static etrax_hc_t *hc;
13155 -       struct usb_bus *bus;
13156 -       struct usb_device *usb_rh;
13157 -       int i;
13158 -
13159 -       DBFENTER;
13160 -
13161 -       info("ETRAX 100LX USB-HCD %s (c) 2001-2003 Axis Communications AB\n", usb_hcd_version);
13162 -
13163 -       hc = kmalloc(sizeof(etrax_hc_t), GFP_KERNEL);
13164 -       assert(hc != NULL);
13165 -
13166 -       /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
13167 -       /* Note that we specify sizeof(USB_EP_Desc_t) as the size, but also allocate
13168 -          SB descriptors from this cache. This is ok since sizeof(USB_EP_Desc_t) ==
13169 -          sizeof(USB_SB_Desc_t). */
13170 -
13171 -       usb_desc_cache = kmem_cache_create("usb_desc_cache", sizeof(USB_EP_Desc_t), 0,
13172 -                                          SLAB_HWCACHE_ALIGN, 0, 0);
13173 -       assert(usb_desc_cache != NULL);
13174 -
13175 -       top_half_reg_cache = kmem_cache_create("top_half_reg_cache",
13176 -                                              sizeof(usb_interrupt_registers_t),
13177 -                                              0, SLAB_HWCACHE_ALIGN, 0, 0);
13178 -       assert(top_half_reg_cache != NULL);
13179 -
13180 -       isoc_compl_cache = kmem_cache_create("isoc_compl_cache",
13181 -                                               sizeof(usb_isoc_complete_data_t),
13182 -                                               0, SLAB_HWCACHE_ALIGN, 0, 0);
13183 -       assert(isoc_compl_cache != NULL);
13184 -
13185 -       etrax_usb_bus = bus = usb_alloc_bus(&etrax_usb_device_operations);
13186 -       hc->bus = bus;
13187 -       bus->bus_name="ETRAX 100LX";
13188 -       bus->hcpriv = hc;
13189 -
13190 -       /* Initialize RH to the default address.
13191 -          And make sure that we have no status change indication */
13192 -       hc->rh.numports = 2;  /* The RH has two ports */
13193 -       hc->rh.devnum = 1;
13194 -       hc->rh.wPortChange_1 = 0;
13195 -       hc->rh.wPortChange_2 = 0;
13196 -
13197 -       /* Also initate the previous values to zero */
13198 -       hc->rh.prev_wPortStatus_1 = 0;
13199 -       hc->rh.prev_wPortStatus_2 = 0;
13200 -
13201 -       /* Initialize the intr-traffic flags */
13202 -       /* FIXME: This isn't used. (Besides, the error field isn't initialized.) */
13203 -       hc->intr.sleeping = 0;
13204 -       hc->intr.wq = NULL;
13205 -
13206 -       epid_usage_bitmask = 0;
13207 -       epid_out_traffic = 0;
13208 -
13209 -       /* Mark the invalid epid as being used. */
13210 -       set_bit(INVALID_EPID, (void *)&epid_usage_bitmask);
13211 -       *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, INVALID_EPID);
13212 -       nop();
13213 -       /* The valid bit should still be set ('invalid' is in our world; not the hardware's). */
13214 -       *R_USB_EPT_DATA = (IO_STATE(R_USB_EPT_DATA, valid, yes) |
13215 -                          IO_FIELD(R_USB_EPT_DATA, max_len, 1));
13216 -
13217 -       /* Mark the dummy epid as being used. */
13218 -       set_bit(DUMMY_EPID, (void *)&epid_usage_bitmask);
13219 -       *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, DUMMY_EPID);
13220 -       nop();
13221 -       *R_USB_EPT_DATA = (IO_STATE(R_USB_EPT_DATA, valid, no) |
13222 -                          IO_FIELD(R_USB_EPT_DATA, max_len, 1));
13223 -
13224 -       /* Initialize the urb list by initiating a head for each list. */
13225 -       for (i = 0; i < NBR_OF_EPIDS; i++) {
13226 -               INIT_LIST_HEAD(&urb_list[i]);
13227 -       }
13228 -       spin_lock_init(&urb_list_lock);
13229 -
13230 -       INIT_LIST_HEAD(&urb_unlink_list);
13231 -
13232 -
13233 -       /* Initiate the bulk start timer. */
13234 -       init_timer(&bulk_start_timer);
13235 -       bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
13236 -       bulk_start_timer.function = etrax_usb_bulk_start_timer_func;
13237 -       add_timer(&bulk_start_timer);
13238 -
13239 -
13240 -       /* Initiate the bulk eot timer. */
13241 -       init_timer(&bulk_eot_timer);
13242 -       bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
13243 -       bulk_eot_timer.function = etrax_usb_bulk_eot_timer_func;
13244 -       add_timer(&bulk_eot_timer);
13245 -
13246 -       /* Set up the data structures for USB traffic. Note that this must be done before
13247 -          any interrupt that relies on sane DMA list occurrs. */
13248 -       init_rx_buffers();
13249 -       init_tx_bulk_ep();
13250 -       init_tx_ctrl_ep();
13251 -       init_tx_intr_ep();
13252 -       init_tx_isoc_ep();
13253 -
13254 -        device_initialize(&fake_device);
13255 -        kobject_set_name(&fake_device.kobj, "etrax_usb");
13256 -        kobject_add(&fake_device.kobj);
13257 -       kobject_uevent(&fake_device.kobj, KOBJ_ADD);
13258 -        hc->bus->controller = &fake_device;
13259 -       usb_register_bus(hc->bus);
13260 -
13261 -       *R_IRQ_MASK2_SET =
13262 -               /* Note that these interrupts are not used. */
13263 -               IO_STATE(R_IRQ_MASK2_SET, dma8_sub0_descr, set) |
13264 -               /* Sub channel 1 (ctrl) descr. interrupts are used. */
13265 -               IO_STATE(R_IRQ_MASK2_SET, dma8_sub1_descr, set) |
13266 -               IO_STATE(R_IRQ_MASK2_SET, dma8_sub2_descr, set) |
13267 -               /* Sub channel 3 (isoc) descr. interrupts are used. */
13268 -               IO_STATE(R_IRQ_MASK2_SET, dma8_sub3_descr, set);
13269 -
13270 -       /* Note that the dma9_descr interrupt is not used. */
13271 -       *R_IRQ_MASK2_SET =
13272 -               IO_STATE(R_IRQ_MASK2_SET, dma9_eop, set) |
13273 -               IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set);
13274 -
13275 -       /* FIXME: Enable iso_eof only when isoc traffic is running. */
13276 -       *R_USB_IRQ_MASK_SET =
13277 -               IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) |
13278 -               IO_STATE(R_USB_IRQ_MASK_SET, bulk_eot, set) |
13279 -               IO_STATE(R_USB_IRQ_MASK_SET, epid_attn, set) |
13280 -               IO_STATE(R_USB_IRQ_MASK_SET, port_status, set) |
13281 -               IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
13282 -
13283 -
13284 -       if (request_irq(ETRAX_USB_HC_IRQ, etrax_usb_hc_interrupt_top_half, 0,
13285 -                       "ETRAX 100LX built-in USB (HC)", hc)) {
13286 -               err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ);
13287 -               etrax_usb_hc_cleanup();
13288 -               DBFEXIT;
13289 -               return -1;
13290 -       }
13291 -
13292 -       if (request_irq(ETRAX_USB_RX_IRQ, etrax_usb_rx_interrupt, 0,
13293 -                       "ETRAX 100LX built-in USB (Rx)", hc)) {
13294 -               err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ);
13295 -               etrax_usb_hc_cleanup();
13296 -               DBFEXIT;
13297 -               return -1;
13298 -       }
13299 -
13300 -       if (request_irq(ETRAX_USB_TX_IRQ, etrax_usb_tx_interrupt, 0,
13301 -                       "ETRAX 100LX built-in USB (Tx)", hc)) {
13302 -               err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ);
13303 -               etrax_usb_hc_cleanup();
13304 -               DBFEXIT;
13305 -               return -1;
13306 -       }
13307 -
13308 -       /* R_USB_COMMAND:
13309 -          USB commands in host mode. The fields in this register should all be
13310 -          written to in one write. Do not read-modify-write one field at a time. A
13311 -          write to this register will trigger events in the USB controller and an
13312 -          incomplete command may lead to unpredictable results, and in worst case
13313 -          even to a deadlock in the controller.
13314 -          (Note however that the busy field is read-only, so no need to write to it.) */
13315 -
13316 -       /* Check the busy bit before writing to R_USB_COMMAND. */
13317 -
13318 -       while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
13319 -
13320 -       /* Reset the USB interface. */
13321 -       *R_USB_COMMAND =
13322 -               IO_STATE(R_USB_COMMAND, port_sel, nop) |
13323 -               IO_STATE(R_USB_COMMAND, port_cmd, reset) |
13324 -               IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
13325 -
13326 -       /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to 0x2A30 (10800),
13327 -          to guarantee that control traffic gets 10% of the bandwidth, and periodic transfer may
13328 -          allocate the rest (90%). This doesn't work though. Read on for a lenghty explanation.
13329 -
13330 -          While there is a difference between rev. 2 and rev. 3 of the ETRAX 100LX regarding the NAK
13331 -          behaviour, it doesn't solve this problem. What happens is that a control transfer will not
13332 -          be interrupted in its data stage when PSTART happens (the point at which periodic traffic
13333 -          is started). Thus, if PSTART is set to 10800 and its IN or OUT token is NAKed until just before
13334 -          PSTART happens, it will continue the IN/OUT transfer as long as it's ACKed. After it's done,
13335 -          there may be too little time left for an isochronous transfer, causing an epid attention
13336 -          interrupt due to perror. The work-around for this is to let the control transfers run at the
13337 -          end of the frame instead of at the beginning, and will be interrupted just fine if it doesn't
13338 -          fit into the frame. However, since there will *always* be a control transfer at the beginning
13339 -          of the frame, regardless of what we set PSTART to, that transfer might be a 64-byte transfer
13340 -          which consumes up to 15% of the frame, leaving only 85% for periodic traffic. The solution to
13341 -          this would be to 'dummy allocate' 5% of the frame with the usb_claim_bandwidth function to make
13342 -          sure that the periodic transfers that are inserted will always fit in the frame.
13343 -
13344 -          The idea was suggested that a control transfer could be split up into several 8 byte transfers,
13345 -          so that it would be interrupted by PSTART, but since this can't be done for an IN transfer this
13346 -          hasn't been implemented.
13347 -
13348 -          The value 11960 is chosen to be just after the SOF token, with a couple of bit times extra
13349 -          for possible bit stuffing. */
13350 -
13351 -       *R_USB_FM_PSTART = IO_FIELD(R_USB_FM_PSTART, value, 11960);
13352 -
13353 -#ifdef CONFIG_ETRAX_USB_HOST_PORT1
13354 -       *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
13355 -#endif
13356 -
13357 -#ifdef CONFIG_ETRAX_USB_HOST_PORT2
13358 -       *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
13359 -#endif
13360 -
13361 -       while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
13362 -
13363 -       /* Configure the USB interface as a host controller. */
13364 -       *R_USB_COMMAND =
13365 -               IO_STATE(R_USB_COMMAND, port_sel, nop) |
13366 -               IO_STATE(R_USB_COMMAND, port_cmd, reset) |
13367 -               IO_STATE(R_USB_COMMAND, ctrl_cmd, host_config);
13368 -
13369 -       /* Note: Do not reset any ports here. Await the port status interrupts, to have a controlled
13370 -          sequence of resetting the ports. If we reset both ports now, and there are devices
13371 -          on both ports, we will get a bus error because both devices will answer the set address
13372 -          request. */
13373 -
13374 -       while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
13375 -
13376 -       /* Start processing of USB traffic. */
13377 -       *R_USB_COMMAND =
13378 -               IO_STATE(R_USB_COMMAND, port_sel, nop) |
13379 -               IO_STATE(R_USB_COMMAND, port_cmd, reset) |
13380 -               IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
13381 -
13382 -       while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
13383 -
13384 -       usb_rh = usb_alloc_dev(NULL, hc->bus, 0);
13385 -       hc->bus->root_hub = usb_rh;
13386 -        usb_rh->state = USB_STATE_ADDRESS;
13387 -        usb_rh->speed = USB_SPEED_FULL;
13388 -        usb_rh->devnum = 1;
13389 -        hc->bus->devnum_next = 2;
13390 -        usb_rh->ep0.desc.wMaxPacketSize = __const_cpu_to_le16(64);
13391 -        usb_get_device_descriptor(usb_rh, USB_DT_DEVICE_SIZE);
13392 -       usb_new_device(usb_rh);
13393 -
13394 -       DBFEXIT;
13395 -
13396 -       return 0;
13397 -}
13398 -
13399 -static void etrax_usb_hc_cleanup(void)
13400 -{
13401 -       DBFENTER;
13402 -
13403 -       free_irq(ETRAX_USB_HC_IRQ, NULL);
13404 -       free_irq(ETRAX_USB_RX_IRQ, NULL);
13405 -       free_irq(ETRAX_USB_TX_IRQ, NULL);
13406 -
13407 -       usb_deregister_bus(etrax_usb_bus);
13408 -
13409 -       /* FIXME: call kmem_cache_destroy here? */
13410 -
13411 -       DBFEXIT;
13412 -}
13413  
13414 -module_init(etrax_usb_hc_init);
13415 -module_exit(etrax_usb_hc_cleanup);
13416 +/* Module hooks */
13417 +module_init(module_hcd_init);
13418 +module_exit(module_hcd_exit);
13419 --- linux-2.6.19.2.orig/drivers/usb/host/hc-crisv10.c   1970-01-01 01:00:00.000000000 +0100
13420 +++ linux-2.6.19.2.dev/drivers/usb/host/hc-crisv10.c    2007-02-26 20:58:29.000000000 +0100
13421 @@ -0,0 +1,4684 @@
13422 +/*
13423 + *
13424 + * ETRAX 100LX USB Host Controller Driver
13425 + *
13426 + * Copyright (C) 2005, 2006  Axis Communications AB
13427 + *
13428 + * Author: Konrad Eriksson <konrad.eriksson@axis.se>
13429 + *
13430 + */
13431 +
13432 +#include <linux/module.h>
13433 +#include <linux/kernel.h>
13434 +#include <linux/init.h>
13435 +#include <linux/moduleparam.h>
13436 +#include <linux/spinlock.h>
13437 +#include <linux/usb.h>
13438 +#include <linux/platform_device.h>
13439 +
13440 +#include <asm/io.h>
13441 +#include <asm/irq.h>
13442 +#include <asm/arch/dma.h>
13443 +#include <asm/arch/io_interface_mux.h>
13444 +
13445 +#include "../core/hcd.h"
13446 +#include "../core/hub.h"
13447 +#include "hc-crisv10.h"
13448 +#include "hc-cris-dbg.h"
13449 +
13450 +
13451 +/***************************************************************************/
13452 +/***************************************************************************/
13453 +/* Host Controller settings                                                */
13454 +/***************************************************************************/
13455 +/***************************************************************************/
13456 +
13457 +#define VERSION                        "1.00"
13458 +#define COPYRIGHT              "(c) 2005, 2006 Axis Communications AB"
13459 +#define DESCRIPTION             "ETRAX 100LX USB Host Controller"
13460 +
13461 +#define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
13462 +#define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
13463 +#define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
13464 +
13465 +/* Number of physical ports in Etrax 100LX */
13466 +#define USB_ROOT_HUB_PORTS 2
13467 +
13468 +const char hc_name[] = "hc-crisv10";
13469 +const char product_desc[] = DESCRIPTION;
13470 +
13471 +/* The number of epids is, among other things, used for pre-allocating
13472 +   ctrl, bulk and isoc EP descriptors (one for each epid).
13473 +   Assumed to be > 1 when initiating the DMA lists. */
13474 +#define NBR_OF_EPIDS       32
13475 +
13476 +/* Support interrupt traffic intervals up to 128 ms. */
13477 +#define MAX_INTR_INTERVAL  128
13478 +
13479 +/* If periodic traffic (intr or isoc) is to be used, then one entry in the EP
13480 +   table must be "invalid". By this we mean that we shouldn't care about epid
13481 +   attentions for this epid, or at least handle them differently from epid
13482 +   attentions for "valid" epids. This define determines which one to use
13483 +   (don't change it). */
13484 +#define INVALID_EPID       31
13485 +/* A special epid for the bulk dummys. */
13486 +#define DUMMY_EPID         30
13487 +
13488 +/* Module settings */
13489 +
13490 +MODULE_DESCRIPTION(DESCRIPTION);
13491 +MODULE_LICENSE("GPL");
13492 +MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>");
13493 +
13494 +
13495 +/* Module parameters */
13496 +
13497 +/* 0 = No ports enabled
13498 +   1 = Only port 1 enabled (on board ethernet on devboard)
13499 +   2 = Only port 2 enabled (external connector on devboard)
13500 +   3 = Both ports enabled
13501 +*/
13502 +static unsigned int ports = 3;
13503 +module_param(ports, uint, S_IRUGO);
13504 +MODULE_PARM_DESC(ports, "Bitmask indicating USB ports to use");
13505 +
13506 +
13507 +/***************************************************************************/
13508 +/***************************************************************************/
13509 +/* Shared global variables for this module                                 */
13510 +/***************************************************************************/
13511 +/***************************************************************************/
13512 +
13513 +/* EP descriptor lists for non period transfers. Must be 32-bit aligned. */
13514 +static volatile struct USB_EP_Desc TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
13515 +
13516 +static volatile struct USB_EP_Desc TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
13517 +
13518 +/* EP descriptor lists for period transfers. Must be 32-bit aligned. */
13519 +static volatile struct USB_EP_Desc TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
13520 +static volatile struct USB_SB_Desc TxIntrSB_zout __attribute__ ((aligned (4)));
13521 +
13522 +static volatile struct USB_EP_Desc TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
13523 +static volatile struct USB_SB_Desc TxIsocSB_zout __attribute__ ((aligned (4)));
13524 +
13525 +static volatile struct USB_SB_Desc TxIsocSBList[NBR_OF_EPIDS] __attribute__ ((aligned (4))); 
13526 +
13527 +/* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set,
13528 +   causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
13529 +   gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
13530 +   EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
13531 +   in each frame. */
13532 +static volatile struct USB_EP_Desc TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
13533 +
13534 +/* List of URB pointers, where each points to the active URB for a epid.
13535 +   For Bulk, Ctrl and Intr this means which URB that currently is added to
13536 +   DMA lists (Isoc URBs are all directly added to DMA lists). As soon as
13537 +   URB has completed is the queue examined and the first URB in queue is
13538 +   removed and moved to the activeUrbList while its state change to STARTED and
13539 +   its transfer(s) gets added to DMA list (exception Isoc where URBs enter
13540 +   state STARTED directly and added transfers added to DMA lists). */
13541 +static struct urb *activeUrbList[NBR_OF_EPIDS];
13542 +
13543 +/* Additional software state info for each epid */
13544 +static struct etrax_epid epid_state[NBR_OF_EPIDS];
13545 +
13546 +/* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops
13547 +   even if there is new data waiting to be processed */
13548 +static struct timer_list bulk_start_timer = TIMER_INITIALIZER(NULL, 0, 0);
13549 +static struct timer_list bulk_eot_timer = TIMER_INITIALIZER(NULL, 0, 0);
13550 +
13551 +/* We want the start timer to expire before the eot timer, because the former
13552 +   might start traffic, thus making it unnecessary for the latter to time
13553 +   out. */
13554 +#define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */
13555 +#define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */
13556 +
13557 +/* Delay before a URB completion happen when it's scheduled to be delayed */
13558 +#define LATER_TIMER_DELAY (HZ/50) /* 20 ms */
13559 +
13560 +/* Simplifying macros for checking software state info of a epid */
13561 +/* ----------------------------------------------------------------------- */
13562 +#define epid_inuse(epid)       epid_state[epid].inuse
13563 +#define epid_out_traffic(epid) epid_state[epid].out_traffic
13564 +#define epid_isoc(epid)   (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0)
13565 +#define epid_intr(epid)   (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0)
13566 +
13567 +
13568 +/***************************************************************************/
13569 +/***************************************************************************/
13570 +/* DEBUG FUNCTIONS                                                         */
13571 +/***************************************************************************/
13572 +/***************************************************************************/
13573 +/* Note that these functions are always available in their "__" variants,
13574 +   for use in error situations. The "__" missing variants are controlled by
13575 +   the USB_DEBUG_DESC/USB_DEBUG_URB macros. */
13576 +static void __dump_urb(struct urb* purb)
13577 +{
13578 +  struct crisv10_urb_priv *urb_priv = purb->hcpriv;
13579 +  int urb_num = -1;
13580 +  if(urb_priv) {
13581 +    urb_num = urb_priv->urb_num;
13582 +  }
13583 +  printk("\nURB:0x%x[%d]\n", (unsigned int)purb, urb_num);
13584 +  printk("dev                   :0x%08lx\n", (unsigned long)purb->dev);
13585 +  printk("pipe                  :0x%08x\n", purb->pipe);
13586 +  printk("status                :%d\n", purb->status);
13587 +  printk("transfer_flags        :0x%08x\n", purb->transfer_flags);
13588 +  printk("transfer_buffer       :0x%08lx\n", (unsigned long)purb->transfer_buffer);
13589 +  printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
13590 +  printk("actual_length         :%d\n", purb->actual_length);
13591 +  printk("setup_packet          :0x%08lx\n", (unsigned long)purb->setup_packet);
13592 +  printk("start_frame           :%d\n", purb->start_frame);
13593 +  printk("number_of_packets     :%d\n", purb->number_of_packets);
13594 +  printk("interval              :%d\n", purb->interval);
13595 +  printk("error_count           :%d\n", purb->error_count);
13596 +  printk("context               :0x%08lx\n", (unsigned long)purb->context);
13597 +  printk("complete              :0x%08lx\n\n", (unsigned long)purb->complete);
13598 +}
13599 +
13600 +static void __dump_in_desc(volatile struct USB_IN_Desc *in)
13601 +{
13602 +  printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
13603 +  printk("  sw_len  : 0x%04x (%d)\n", in->sw_len, in->sw_len);
13604 +  printk("  command : 0x%04x\n", in->command);
13605 +  printk("  next    : 0x%08lx\n", in->next);
13606 +  printk("  buf     : 0x%08lx\n", in->buf);
13607 +  printk("  hw_len  : 0x%04x (%d)\n", in->hw_len, in->hw_len);
13608 +  printk("  status  : 0x%04x\n\n", in->status);
13609 +}
13610 +
13611 +static void __dump_sb_desc(volatile struct USB_SB_Desc *sb)
13612 +{
13613 +  char tt = (sb->command & 0x30) >> 4;
13614 +  char *tt_string;
13615 +
13616 +  switch (tt) {
13617 +  case 0:
13618 +    tt_string = "zout";
13619 +    break;
13620 +  case 1:
13621 +    tt_string = "in";
13622 +    break;
13623 +  case 2:
13624 +    tt_string = "out";
13625 +    break;
13626 +  case 3:
13627 +    tt_string = "setup";
13628 +    break;
13629 +  default:
13630 +    tt_string = "unknown (weird)";
13631 +  }
13632 +
13633 +  printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb);
13634 +  printk(" command:0x%04x (", sb->command);
13635 +  printk("rem:%d ", (sb->command & 0x3f00) >> 8);
13636 +  printk("full:%d ", (sb->command & 0x40) >> 6);
13637 +  printk("tt:%d(%s) ", tt, tt_string);
13638 +  printk("intr:%d ", (sb->command & 0x8) >> 3);
13639 +  printk("eot:%d ", (sb->command & 0x2) >> 1);
13640 +  printk("eol:%d)", sb->command & 0x1);
13641 +  printk(" sw_len:0x%04x(%d)", sb->sw_len, sb->sw_len);
13642 +  printk(" next:0x%08lx", sb->next);
13643 +  printk(" buf:0x%08lx\n", sb->buf);
13644 +}
13645 +
13646 +
13647 +static void __dump_ep_desc(volatile struct USB_EP_Desc *ep)
13648 +{
13649 +  printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep);
13650 +  printk(" command:0x%04x (", ep->command);
13651 +  printk("ep_id:%d ", (ep->command & 0x1f00) >> 8);
13652 +  printk("enable:%d ", (ep->command & 0x10) >> 4);
13653 +  printk("intr:%d ", (ep->command & 0x8) >> 3);
13654 +  printk("eof:%d ", (ep->command & 0x2) >> 1);
13655 +  printk("eol:%d)", ep->command & 0x1);
13656 +  printk(" hw_len:0x%04x(%d)", ep->hw_len, ep->hw_len);
13657 +  printk(" next:0x%08lx", ep->next);
13658 +  printk(" sub:0x%08lx\n", ep->sub);
13659 +}
13660 +
13661 +static inline void __dump_ep_list(int pipe_type)
13662 +{
13663 +  volatile struct USB_EP_Desc *ep;
13664 +  volatile struct USB_EP_Desc *first_ep;
13665 +  volatile struct USB_SB_Desc *sb;
13666 +
13667 +  switch (pipe_type)
13668 +    {
13669 +    case PIPE_BULK:
13670 +      first_ep = &TxBulkEPList[0];
13671 +      break;
13672 +    case PIPE_CONTROL:
13673 +      first_ep = &TxCtrlEPList[0];
13674 +      break;
13675 +    case PIPE_INTERRUPT:
13676 +      first_ep = &TxIntrEPList[0];
13677 +      break;
13678 +    case PIPE_ISOCHRONOUS:
13679 +      first_ep = &TxIsocEPList[0];
13680 +      break;
13681 +    default:
13682 +      warn("Cannot dump unknown traffic type");
13683 +      return;
13684 +    }
13685 +  ep = first_ep;
13686 +
13687 +  printk("\n\nDumping EP list...\n\n");
13688 +
13689 +  do {
13690 +    __dump_ep_desc(ep);
13691 +    /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
13692 +    sb = ep->sub ? phys_to_virt(ep->sub) : 0;
13693 +    while (sb) {
13694 +      __dump_sb_desc(sb);
13695 +      sb = sb->next ? phys_to_virt(sb->next) : 0;
13696 +    }
13697 +    ep = (volatile struct USB_EP_Desc *)(phys_to_virt(ep->next));
13698 +
13699 +  } while (ep != first_ep);
13700 +}
13701 +
13702 +static inline void __dump_ept_data(int epid)
13703 +{
13704 +  unsigned long flags;
13705 +  __u32 r_usb_ept_data;
13706 +
13707 +  if (epid < 0 || epid > 31) {
13708 +    printk("Cannot dump ept data for invalid epid %d\n", epid);
13709 +    return;
13710 +  }
13711 +
13712 +  local_irq_save(flags);
13713 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
13714 +  nop();
13715 +  r_usb_ept_data = *R_USB_EPT_DATA;
13716 +  local_irq_restore(flags);
13717 +
13718 +  printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
13719 +  if (r_usb_ept_data == 0) {
13720 +    /* No need for more detailed printing. */
13721 +    return;
13722 +  }
13723 +  printk("  valid           : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
13724 +  printk("  hold            : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
13725 +  printk("  error_count_in  : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
13726 +  printk("  t_in            : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
13727 +  printk("  low_speed       : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
13728 +  printk("  port            : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
13729 +  printk("  error_code      : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
13730 +  printk("  t_out           : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
13731 +  printk("  error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
13732 +  printk("  max_len         : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
13733 +  printk("  ep              : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
13734 +  printk("  dev             : %d\n", (r_usb_ept_data & 0x0000003f));
13735 +}
13736 +
13737 +static inline void __dump_ept_data_iso(int epid)
13738 +{
13739 +  unsigned long flags;
13740 +  __u32 ept_data;
13741 +
13742 +  if (epid < 0 || epid > 31) {
13743 +    printk("Cannot dump ept data for invalid epid %d\n", epid);
13744 +    return;
13745 +  }
13746 +
13747 +  local_irq_save(flags);
13748 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
13749 +  nop();
13750 +  ept_data = *R_USB_EPT_DATA_ISO;
13751 +  local_irq_restore(flags);
13752 +
13753 +  printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data, epid);
13754 +  if (ept_data == 0) {
13755 +    /* No need for more detailed printing. */
13756 +    return;
13757 +  }
13758 +  printk("  valid           : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, valid,
13759 +                                               ept_data));
13760 +  printk("  port            : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, port,
13761 +                                               ept_data));
13762 +  printk("  error_code      : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code,
13763 +                                               ept_data));
13764 +  printk("  max_len         : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len,
13765 +                                               ept_data));
13766 +  printk("  ep              : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, ep,
13767 +                                               ept_data));
13768 +  printk("  dev             : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, dev,
13769 +                                               ept_data));
13770 +}
13771 +
13772 +static inline void __dump_ept_data_list(void)
13773 +{
13774 +  int i;
13775 +
13776 +  printk("Dumping the whole R_USB_EPT_DATA list\n");
13777 +
13778 +  for (i = 0; i < 32; i++) {
13779 +    __dump_ept_data(i);
13780 +  }
13781 +}
13782 +
13783 +static void debug_epid(int epid) {
13784 +  int i;
13785 +  
13786 +  if(epid_isoc(epid)) {
13787 +    __dump_ept_data_iso(epid);
13788 +  } else {
13789 +    __dump_ept_data(epid);
13790 +  }
13791 +
13792 +  printk("Bulk:\n");
13793 +  for(i = 0; i < 32; i++) {
13794 +    if(IO_EXTRACT(USB_EP_command, epid, TxBulkEPList[i].command) ==
13795 +       epid) {
13796 +      printk("%d: ", i); __dump_ep_desc(&(TxBulkEPList[i]));
13797 +    }
13798 +  }
13799 +
13800 +  printk("Ctrl:\n");
13801 +  for(i = 0; i < 32; i++) {
13802 +    if(IO_EXTRACT(USB_EP_command, epid, TxCtrlEPList[i].command) ==
13803 +       epid) {
13804 +      printk("%d: ", i); __dump_ep_desc(&(TxCtrlEPList[i]));
13805 +    }
13806 +  }
13807 +
13808 +  printk("Intr:\n");
13809 +  for(i = 0; i < MAX_INTR_INTERVAL; i++) {
13810 +    if(IO_EXTRACT(USB_EP_command, epid, TxIntrEPList[i].command) ==
13811 +       epid) {
13812 +      printk("%d: ", i); __dump_ep_desc(&(TxIntrEPList[i]));
13813 +    }
13814 +  }
13815 +  
13816 +  printk("Isoc:\n");
13817 +  for(i = 0; i < 32; i++) {
13818 +    if(IO_EXTRACT(USB_EP_command, epid, TxIsocEPList[i].command) ==
13819 +       epid) {
13820 +      printk("%d: ", i); __dump_ep_desc(&(TxIsocEPList[i]));
13821 +    }
13822 +  }
13823 +
13824 +  __dump_ept_data_list();
13825 +  __dump_ep_list(PIPE_INTERRUPT);
13826 +  printk("\n\n");
13827 +}
13828 +
13829 +
13830 +
13831 +char* hcd_status_to_str(__u8 bUsbStatus) {
13832 +  static char hcd_status_str[128];
13833 +  hcd_status_str[0] = '\0';
13834 +  if(bUsbStatus & IO_STATE(R_USB_STATUS, ourun, yes)) {
13835 +    strcat(hcd_status_str, "ourun ");
13836 +  }
13837 +  if(bUsbStatus & IO_STATE(R_USB_STATUS, perror, yes)) {
13838 +    strcat(hcd_status_str, "perror ");
13839 +  }
13840 +  if(bUsbStatus & IO_STATE(R_USB_STATUS, device_mode, yes)) {
13841 +    strcat(hcd_status_str, "device_mode ");
13842 +  }
13843 +  if(bUsbStatus & IO_STATE(R_USB_STATUS, host_mode, yes)) {
13844 +    strcat(hcd_status_str, "host_mode ");
13845 +  }
13846 +  if(bUsbStatus & IO_STATE(R_USB_STATUS, started, yes)) {
13847 +    strcat(hcd_status_str, "started ");
13848 +  }
13849 +  if(bUsbStatus & IO_STATE(R_USB_STATUS, running, yes)) {
13850 +    strcat(hcd_status_str, "running ");
13851 +  }
13852 +  return hcd_status_str;
13853 +}
13854 +
13855 +
13856 +char* sblist_to_str(struct USB_SB_Desc* sb_desc) {
13857 +  static char sblist_to_str_buff[128];
13858 +  char tmp[32], tmp2[32];
13859 +  sblist_to_str_buff[0] = '\0';
13860 +  while(sb_desc != NULL) {
13861 +    switch(IO_EXTRACT(USB_SB_command, tt, sb_desc->command)) {
13862 +    case 0: sprintf(tmp, "zout");  break;
13863 +    case 1: sprintf(tmp, "in");    break;
13864 +    case 2: sprintf(tmp, "out");   break;
13865 +    case 3: sprintf(tmp, "setup"); break;
13866 +    }
13867 +    sprintf(tmp2, "(%s %d)", tmp, sb_desc->sw_len);
13868 +    strcat(sblist_to_str_buff, tmp2);
13869 +    if(sb_desc->next != 0) {
13870 +      sb_desc = phys_to_virt(sb_desc->next);
13871 +    } else {
13872 +      sb_desc = NULL;
13873 +    }
13874 +  }
13875 +  return sblist_to_str_buff;
13876 +}
13877 +
13878 +char* port_status_to_str(__u16 wPortStatus) {
13879 +  static char port_status_str[128];
13880 +  port_status_str[0] = '\0';
13881 +  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) {
13882 +    strcat(port_status_str, "connected ");
13883 +  }
13884 +  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) {
13885 +    strcat(port_status_str, "enabled ");
13886 +  }
13887 +  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, suspended, yes)) {
13888 +    strcat(port_status_str, "suspended ");
13889 +  }
13890 +  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes)) {
13891 +    strcat(port_status_str, "reset ");
13892 +  }
13893 +  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, speed, full)) {
13894 +    strcat(port_status_str, "full-speed ");
13895 +  } else {
13896 +    strcat(port_status_str, "low-speed ");
13897 +  }
13898 +  return port_status_str;
13899 +}
13900 +
13901 +
13902 +char* endpoint_to_str(struct usb_endpoint_descriptor *ed) {
13903 +  static char endpoint_to_str_buff[128];
13904 +  char tmp[32];
13905 +  int epnum = ed->bEndpointAddress & 0x0F;
13906 +  int dir = ed->bEndpointAddress & 0x80;
13907 +  int type = ed->bmAttributes & 0x03;
13908 +  endpoint_to_str_buff[0] = '\0';
13909 +  sprintf(endpoint_to_str_buff, "ep:%d ", epnum);
13910 +  switch(type) {
13911 +  case 0:
13912 +    sprintf(tmp, " ctrl");
13913 +    break;
13914 +  case 1:
13915 +    sprintf(tmp, " isoc");
13916 +    break;
13917 +  case 2:
13918 +    sprintf(tmp, " bulk");
13919 +    break;
13920 +  case 3:
13921 +    sprintf(tmp, " intr");
13922 +    break;
13923 +  }
13924 +  strcat(endpoint_to_str_buff, tmp);
13925 +  if(dir) {
13926 +    sprintf(tmp, " in");
13927 +  } else {
13928 +    sprintf(tmp, " out");
13929 +  }
13930 +  strcat(endpoint_to_str_buff, tmp);
13931 +
13932 +  return endpoint_to_str_buff;
13933 +}
13934 +
13935 +/* Debug helper functions for Transfer Controller */
13936 +char* pipe_to_str(unsigned int pipe) {
13937 +  static char pipe_to_str_buff[128];
13938 +  char tmp[64];
13939 +  sprintf(pipe_to_str_buff, "dir:%s", str_dir(pipe));
13940 +  sprintf(tmp, " type:%s", str_type(pipe));
13941 +  strcat(pipe_to_str_buff, tmp);
13942 +
13943 +  sprintf(tmp, " dev:%d", usb_pipedevice(pipe));
13944 +  strcat(pipe_to_str_buff, tmp);
13945 +  sprintf(tmp, " ep:%d", usb_pipeendpoint(pipe));
13946 +  strcat(pipe_to_str_buff, tmp);
13947 +  return pipe_to_str_buff;
13948 +}
13949 +
13950 +
13951 +#define USB_DEBUG_DESC 1
13952 +
13953 +#ifdef USB_DEBUG_DESC
13954 +#define dump_in_desc(x) __dump_in_desc(x)
13955 +#define dump_sb_desc(...) __dump_sb_desc(...)
13956 +#define dump_ep_desc(x) __dump_ep_desc(x)
13957 +#define dump_ept_data(x) __dump_ept_data(x)
13958 +#else
13959 +#define dump_in_desc(...) do {} while (0)
13960 +#define dump_sb_desc(...) do {} while (0)
13961 +#define dump_ep_desc(...) do {} while (0)
13962 +#endif
13963 +
13964 +
13965 +/* Uncomment this to enable massive function call trace
13966 +   #define USB_DEBUG_TRACE */
13967 +
13968 +#ifdef USB_DEBUG_TRACE
13969 +#define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
13970 +#define DBFEXIT  (printk(": Exiting:  %s\n", __FUNCTION__))
13971 +#else
13972 +#define DBFENTER do {} while (0)
13973 +#define DBFEXIT  do {} while (0)
13974 +#endif
13975 +
13976 +#define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
13977 +{panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
13978 +
13979 +/* Most helpful debugging aid */
13980 +#define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__))))
13981 +
13982 +
13983 +/***************************************************************************/
13984 +/***************************************************************************/
13985 +/* Forward declarations                                                    */
13986 +/***************************************************************************/
13987 +/***************************************************************************/
13988 +void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg);
13989 +void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg);
13990 +void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg);
13991 +void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg);
13992 +
13993 +void rh_port_status_change(__u16[]);
13994 +int  rh_clear_port_feature(__u8, __u16);
13995 +int  rh_set_port_feature(__u8, __u16);
13996 +static void rh_disable_port(unsigned int port);
13997 +
13998 +static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
13999 +                                        int timer);
14000 +
14001 +static int  tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
14002 +                        int mem_flags);
14003 +static void tc_free_epid(struct usb_host_endpoint *ep);
14004 +static int  tc_allocate_epid(void);
14005 +static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status);
14006 +static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
14007 +                               int status);
14008 +
14009 +static int  urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
14010 +                          int mem_flags);
14011 +static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb);
14012 +
14013 +static inline struct urb *urb_list_first(int epid);
14014 +static inline void        urb_list_add(struct urb *urb, int epid,
14015 +                                     int mem_flags);
14016 +static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid);
14017 +static inline void        urb_list_del(struct urb *urb, int epid);
14018 +static inline void        urb_list_move_last(struct urb *urb, int epid);
14019 +static inline struct urb *urb_list_next(struct urb *urb, int epid);
14020 +
14021 +int create_sb_for_urb(struct urb *urb, int mem_flags);
14022 +int init_intr_urb(struct urb *urb, int mem_flags);
14023 +
14024 +static inline void  etrax_epid_set(__u8 index, __u32 data);
14025 +static inline void  etrax_epid_clear_error(__u8 index);
14026 +static inline void  etrax_epid_set_toggle(__u8 index, __u8 dirout,
14027 +                                             __u8 toggle);
14028 +static inline __u8  etrax_epid_get_toggle(__u8 index, __u8 dirout);
14029 +static inline __u32 etrax_epid_get(__u8 index);
14030 +
14031 +/* We're accessing the same register position in Etrax so
14032 +   when we do full access the internal difference doesn't matter */
14033 +#define etrax_epid_iso_set(index, data) etrax_epid_set(index, data)
14034 +#define etrax_epid_iso_get(index) etrax_epid_get(index)
14035 +
14036 +
14037 +static void        tc_dma_process_isoc_urb(struct urb *urb);
14038 +static void        tc_dma_process_queue(int epid);
14039 +static void        tc_dma_unlink_intr_urb(struct urb *urb);
14040 +static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc);
14041 +static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc);
14042 +
14043 +static void tc_bulk_start_timer_func(unsigned long dummy);
14044 +static void tc_bulk_eot_timer_func(unsigned long dummy);
14045 +
14046 +
14047 +/*************************************************************/
14048 +/*************************************************************/
14049 +/* Host Controler Driver block                               */
14050 +/*************************************************************/
14051 +/*************************************************************/
14052 +
14053 +/* HCD operations */
14054 +static irqreturn_t crisv10_hcd_top_irq(int irq, void*);
14055 +static int crisv10_hcd_reset(struct usb_hcd *);
14056 +static int crisv10_hcd_start(struct usb_hcd *);
14057 +static void crisv10_hcd_stop(struct usb_hcd *);
14058 +#ifdef CONFIG_PM
14059 +static int crisv10_hcd_suspend(struct device *, u32, u32);
14060 +static int crisv10_hcd_resume(struct device *, u32);
14061 +#endif /* CONFIG_PM */
14062 +static int crisv10_hcd_get_frame(struct usb_hcd *);
14063 +
14064 +static int  tc_urb_enqueue(struct usb_hcd *, struct usb_host_endpoint *ep, struct urb *, gfp_t mem_flags);
14065 +static int  tc_urb_dequeue(struct usb_hcd *, struct urb *);
14066 +static void tc_endpoint_disable(struct usb_hcd *, struct usb_host_endpoint *ep);
14067 +
14068 +static int rh_status_data_request(struct usb_hcd *, char *);
14069 +static int rh_control_request(struct usb_hcd *, u16, u16, u16, char*, u16);
14070 +
14071 +#ifdef CONFIG_PM
14072 +static int crisv10_hcd_hub_suspend(struct usb_hcd *);
14073 +static int crisv10_hcd_hub_resume(struct usb_hcd *);
14074 +#endif /* CONFIG_PM */
14075 +#ifdef CONFIG_USB_OTG
14076 +static int crisv10_hcd_start_port_reset(struct usb_hcd *, unsigned);
14077 +#endif /* CONFIG_USB_OTG */
14078 +
14079 +/* host controller driver interface */
14080 +static const struct hc_driver crisv10_hc_driver = 
14081 +  {
14082 +    .description =     hc_name,
14083 +    .product_desc =    product_desc,
14084 +    .hcd_priv_size =   sizeof(struct crisv10_hcd),
14085 +
14086 +    /* Attaching IRQ handler manualy in probe() */
14087 +    /* .irq =          crisv10_hcd_irq, */
14088 +
14089 +    .flags =           HCD_USB11,
14090 +
14091 +    /* called to init HCD and root hub */
14092 +    .reset =           crisv10_hcd_reset,
14093 +    .start =           crisv10_hcd_start,      
14094 +
14095 +    /* cleanly make HCD stop writing memory and doing I/O */
14096 +    .stop =            crisv10_hcd_stop,
14097 +
14098 +    /* return current frame number */
14099 +    .get_frame_number =        crisv10_hcd_get_frame,
14100 +
14101 +
14102 +    /* Manage i/o requests via the Transfer Controller */
14103 +    .urb_enqueue =     tc_urb_enqueue,
14104 +    .urb_dequeue =     tc_urb_dequeue,
14105 +
14106 +    /* hw synch, freeing endpoint resources that urb_dequeue can't */
14107 +    .endpoint_disable = tc_endpoint_disable,
14108 +
14109 +
14110 +    /* Root Hub support */
14111 +    .hub_status_data = rh_status_data_request,
14112 +    .hub_control =     rh_control_request,
14113 +#ifdef CONFIG_PM
14114 +    .hub_suspend =     rh_suspend_request,
14115 +    .hub_resume =      rh_resume_request,
14116 +#endif /* CONFIG_PM */
14117 +#ifdef CONFIG_USB_OTG
14118 +    .start_port_reset =        crisv10_hcd_start_port_reset,
14119 +#endif /* CONFIG_USB_OTG */
14120 +  };
14121 +
14122 +
14123 +/*
14124 + * conversion between pointers to a hcd and the corresponding
14125 + * crisv10_hcd 
14126 + */
14127 +
14128 +static inline struct crisv10_hcd *hcd_to_crisv10_hcd(struct usb_hcd *hcd)
14129 +{
14130 +       return (struct crisv10_hcd *) hcd->hcd_priv;
14131 +}
14132 +
14133 +static inline struct usb_hcd *crisv10_hcd_to_hcd(struct crisv10_hcd *hcd)
14134 +{
14135 +       return container_of((void *) hcd, struct usb_hcd, hcd_priv);
14136 +}
14137 +
14138 +/* check if specified port is in use */
14139 +static inline int port_in_use(unsigned int port)
14140 +{
14141 +       return ports & (1 << port);
14142 +}
14143 +
14144 +/* number of ports in use */
14145 +static inline unsigned int num_ports(void)
14146 +{
14147 +       unsigned int i, num = 0;
14148 +       for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
14149 +               if (port_in_use(i))
14150 +                       num++;
14151 +       return num;
14152 +}
14153 +
14154 +/* map hub port number to the port number used internally by the HC */
14155 +static inline unsigned int map_port(unsigned int port)
14156 +{
14157 +  unsigned int i, num = 0;
14158 +  for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
14159 +    if (port_in_use(i))
14160 +      if (++num == port)
14161 +       return i;
14162 +  return -1;
14163 +}
14164 +
14165 +/* size of descriptors in slab cache */
14166 +#ifndef MAX
14167 +#define MAX(x, y)              ((x) > (y) ? (x) : (y))
14168 +#endif
14169 +
14170 +
14171 +/******************************************************************/
14172 +/* Hardware Interrupt functions                                   */
14173 +/******************************************************************/
14174 +
14175 +/* Fast interrupt handler for HC */
14176 +static irqreturn_t crisv10_hcd_top_irq(int irq, void *vcd)
14177 +{
14178 +  struct usb_hcd *hcd = vcd;
14179 +  struct crisv10_irq_reg reg;
14180 +  __u32 irq_mask;
14181 +  unsigned long flags;
14182 +
14183 +  DBFENTER;
14184 +
14185 +  ASSERT(hcd != NULL);
14186 +  reg.hcd = hcd;
14187 +
14188 +  /* Turn of other interrupts while handling these sensitive cases */
14189 +  local_irq_save(flags);
14190 +  
14191 +  /* Read out which interrupts that are flaged */
14192 +  irq_mask = *R_USB_IRQ_MASK_READ;
14193 +  reg.r_usb_irq_mask_read = irq_mask;
14194 +
14195 +  /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that
14196 +     R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter
14197 +     clears the ourun and perror fields of R_USB_STATUS. */
14198 +  reg.r_usb_status = *R_USB_STATUS;
14199 +  
14200 +  /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn
14201 +     interrupts. */
14202 +  reg.r_usb_epid_attn = *R_USB_EPID_ATTN;
14203 +  
14204 +  /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
14205 +     port_status interrupt. */
14206 +  reg.r_usb_rh_port_status_1 = *R_USB_RH_PORT_STATUS_1;
14207 +  reg.r_usb_rh_port_status_2 = *R_USB_RH_PORT_STATUS_2;
14208 +  
14209 +  /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
14210 +  /* Note: the lower 11 bits contain the actual frame number, sent with each
14211 +     sof. */
14212 +  reg.r_usb_fm_number = *R_USB_FM_NUMBER;
14213 +
14214 +  /* Interrupts are handled in order of priority. */
14215 +  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
14216 +    crisv10_hcd_port_status_irq(&reg);
14217 +  }
14218 +  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
14219 +    crisv10_hcd_epid_attn_irq(&reg);
14220 +  }
14221 +  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
14222 +    crisv10_hcd_ctl_status_irq(&reg);
14223 +  }
14224 +  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
14225 +    crisv10_hcd_isoc_eof_irq(&reg);
14226 +  }
14227 +  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
14228 +    /* Update/restart the bulk start timer since obviously the channel is
14229 +       running. */
14230 +    mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
14231 +    /* Update/restart the bulk eot timer since we just received an bulk eot
14232 +       interrupt. */
14233 +    mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
14234 +
14235 +    /* Check for finished bulk transfers on epids */
14236 +    check_finished_bulk_tx_epids(hcd, 0);
14237 +  }
14238 +  local_irq_restore(flags);
14239 +
14240 +  DBFEXIT;
14241 +  return IRQ_HANDLED;
14242 +}
14243 +
14244 +
14245 +void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg) {
14246 +  struct usb_hcd *hcd = reg->hcd;
14247 +  struct crisv10_urb_priv *urb_priv;
14248 +  int epid;
14249 +  DBFENTER;
14250 +
14251 +  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
14252 +    if (test_bit(epid, (void *)&reg->r_usb_epid_attn)) {
14253 +      struct urb *urb;
14254 +      __u32 ept_data;
14255 +      int error_code;
14256 +
14257 +      if (epid == DUMMY_EPID || epid == INVALID_EPID) {
14258 +       /* We definitely don't care about these ones. Besides, they are
14259 +          always disabled, so any possible disabling caused by the
14260 +          epid attention interrupt is irrelevant. */
14261 +       warn("Got epid_attn for INVALID_EPID or DUMMY_EPID (%d).", epid);
14262 +       continue;
14263 +      }
14264 +
14265 +      if(!epid_inuse(epid)) {
14266 +       irq_err("Epid attention on epid:%d that isn't in use\n", epid);
14267 +       printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
14268 +       debug_epid(epid);
14269 +       continue;
14270 +      }
14271 +
14272 +      /* Note that although there are separate R_USB_EPT_DATA and
14273 +        R_USB_EPT_DATA_ISO registers, they are located at the same address and
14274 +        are of the same size. In other words, this read should be ok for isoc
14275 +        also. */
14276 +      ept_data = etrax_epid_get(epid);
14277 +      error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, ept_data);
14278 +
14279 +      /* Get the active URB for this epid. We blatantly assume
14280 +        that only this URB could have caused the epid attention. */
14281 +      urb = activeUrbList[epid];
14282 +      if (urb == NULL) {
14283 +       irq_err("Attention on epid:%d error:%d with no active URB.\n",
14284 +               epid, error_code);
14285 +       printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
14286 +       debug_epid(epid);
14287 +       continue;
14288 +      }
14289 +
14290 +      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
14291 +      ASSERT(urb_priv);
14292 +
14293 +      /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
14294 +      if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
14295 +
14296 +       /* Isoc traffic doesn't have error_count_in/error_count_out. */
14297 +       if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
14298 +           (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, ept_data) == 3 ||
14299 +            IO_EXTRACT(R_USB_EPT_DATA, error_count_out, ept_data) == 3)) {
14300 +         /* Check if URB allready is marked for late-finish, we can get
14301 +            several 3rd error for Intr traffic when a device is unplugged */
14302 +         if(urb_priv->later_data == NULL) {
14303 +           /* 3rd error. */
14304 +           irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid,
14305 +                    str_dir(urb->pipe), str_type(urb->pipe),
14306 +                    (unsigned int)urb, urb_priv->urb_num);
14307 +         
14308 +           tc_finish_urb_later(hcd, urb, -EPROTO);
14309 +         }
14310 +
14311 +       } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
14312 +         irq_warn("Perror for epid:%d\n", epid);
14313 +         printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
14314 +         printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
14315 +         __dump_urb(urb);
14316 +         debug_epid(epid);
14317 +
14318 +         if (!(ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
14319 +           /* invalid ep_id */
14320 +           panic("Perror because of invalid epid."
14321 +                 " Deconfigured too early?");
14322 +         } else {
14323 +           /* past eof1, near eof, zout transfer, setup transfer */
14324 +           /* Dump the urb and the relevant EP descriptor. */
14325 +           panic("Something wrong with DMA descriptor contents."
14326 +                 " Too much traffic inserted?");
14327 +         }
14328 +       } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
14329 +         /* buffer ourun */
14330 +         printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
14331 +         printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
14332 +         __dump_urb(urb);
14333 +         debug_epid(epid);
14334 +
14335 +         panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid);
14336 +       } else {
14337 +         irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid,
14338 +                  str_dir(urb->pipe), str_type(urb->pipe));
14339 +         printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
14340 +         __dump_urb(urb);
14341 +         debug_epid(epid);
14342 +       }
14343 +
14344 +      } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
14345 +                                             stall)) {
14346 +       /* Not really a protocol error, just says that the endpoint gave
14347 +          a stall response. Note that error_code cannot be stall for isoc. */
14348 +       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
14349 +         panic("Isoc traffic cannot stall");
14350 +       }
14351 +
14352 +       tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid,
14353 +              str_dir(urb->pipe), str_type(urb->pipe), (unsigned int)urb);
14354 +       tc_finish_urb(hcd, urb, -EPIPE);
14355 +
14356 +      } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
14357 +                                             bus_error)) {
14358 +       /* Two devices responded to a transaction request. Must be resolved
14359 +          by software. FIXME: Reset ports? */
14360 +       panic("Bus error for epid %d."
14361 +             " Two devices responded to transaction request\n",
14362 +             epid);
14363 +
14364 +      } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
14365 +                                             buffer_error)) {
14366 +       /* DMA overrun or underrun. */
14367 +       irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid,
14368 +                str_dir(urb->pipe), str_type(urb->pipe));
14369 +
14370 +       /* It seems that error_code = buffer_error in
14371 +          R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
14372 +          are the same error. */
14373 +       tc_finish_urb(hcd, urb, -EPROTO);
14374 +      } else {
14375 +         irq_warn("Unknown attention on epid:%d (%s %s)\n", epid,
14376 +                  str_dir(urb->pipe), str_type(urb->pipe));
14377 +         dump_ept_data(epid);
14378 +      }
14379 +    }
14380 +  }
14381 +  DBFEXIT;
14382 +}
14383 +
14384 +void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg)
14385 +{
14386 +  __u16 port_reg[USB_ROOT_HUB_PORTS];
14387 +  DBFENTER;
14388 +  port_reg[0] = reg->r_usb_rh_port_status_1;
14389 +  port_reg[1] = reg->r_usb_rh_port_status_2;
14390 +  rh_port_status_change(port_reg);
14391 +  DBFEXIT;
14392 +}
14393 +
14394 +void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg)
14395 +{
14396 +  int epid;
14397 +  struct urb *urb;
14398 +  struct crisv10_urb_priv *urb_priv;
14399 +
14400 +  DBFENTER;
14401 +
14402 +  for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
14403 +
14404 +    /* Only check epids that are in use, is valid and has SB list */
14405 +    if (!epid_inuse(epid) || epid == INVALID_EPID ||
14406 +       TxIsocEPList[epid].sub == 0 || epid == DUMMY_EPID) {
14407 +      /* Nothing here to see. */
14408 +      continue;
14409 +    }
14410 +    ASSERT(epid_isoc(epid));
14411 +
14412 +    /* Get the active URB for this epid (if any). */
14413 +    urb = activeUrbList[epid];
14414 +    if (urb == 0) {
14415 +      isoc_warn("Ignoring NULL urb for epid:%d\n", epid);
14416 +      continue;
14417 +    }
14418 +    if(!epid_out_traffic(epid)) {
14419 +      /* Sanity check. */
14420 +      ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
14421 +
14422 +      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
14423 +      ASSERT(urb_priv);
14424 +
14425 +      if (urb_priv->urb_state == NOT_STARTED) {
14426 +       /* If ASAP is not set and urb->start_frame is the current frame,
14427 +          start the transfer. */
14428 +       if (!(urb->transfer_flags & URB_ISO_ASAP) &&
14429 +           (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
14430 +         /* EP should not be enabled if we're waiting for start_frame */
14431 +         ASSERT((TxIsocEPList[epid].command &
14432 +                 IO_STATE(USB_EP_command, enable, yes)) == 0);
14433 +
14434 +         isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid);
14435 +         TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
14436 +
14437 +         /* This urb is now active. */
14438 +         urb_priv->urb_state = STARTED;
14439 +         continue;
14440 +       }
14441 +      }
14442 +    }
14443 +  }
14444 +
14445 +  DBFEXIT;
14446 +}
14447 +
14448 +void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg)
14449 +{
14450 +  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(reg->hcd);
14451 +
14452 +  DBFENTER;
14453 +  ASSERT(crisv10_hcd);
14454 +
14455 +  irq_dbg("ctr_status_irq, controller status: %s\n",
14456 +         hcd_status_to_str(reg->r_usb_status));
14457 +  
14458 +  /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
14459 +     list for the corresponding epid? */
14460 +  if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
14461 +    panic("USB controller got ourun.");
14462 +  }
14463 +  if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
14464 +    
14465 +    /* Before, etrax_usb_do_intr_recover was called on this epid if it was
14466 +       an interrupt pipe. I don't see how re-enabling all EP descriptors
14467 +       will help if there was a programming error. */
14468 +    panic("USB controller got perror.");
14469 +  }
14470 +
14471 +  /* Keep track of USB Controller, if it's running or not */
14472 +  if(reg->r_usb_status & IO_STATE(R_USB_STATUS, running, yes)) {
14473 +    crisv10_hcd->running = 1;
14474 +  } else {
14475 +    crisv10_hcd->running = 0;
14476 +  }
14477 +  
14478 +  if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
14479 +    /* We should never operate in device mode. */
14480 +    panic("USB controller in device mode.");
14481 +  }
14482 +
14483 +  /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably
14484 +     using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */
14485 +  set_bit(HCD_FLAG_SAW_IRQ, &reg->hcd->flags);
14486 +  
14487 +  DBFEXIT;
14488 +}
14489 +
14490 +
14491 +/******************************************************************/
14492 +/* Host Controller interface functions                            */
14493 +/******************************************************************/
14494 +
14495 +static inline void crisv10_ready_wait(void) {
14496 +  volatile int timeout = 10000;
14497 +  /* Check the busy bit of USB controller in Etrax */
14498 +  while((*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy)) &&
14499 +       (timeout-- > 0));
14500 +  if(timeout == 0) {
14501 +    warn("Timeout while waiting for USB controller to be idle\n");
14502 +  }
14503 +}
14504 +
14505 +/* reset host controller */
14506 +static int crisv10_hcd_reset(struct usb_hcd *hcd)
14507 +{
14508 +  DBFENTER;
14509 +  hcd_dbg(hcd, "reset\n");
14510 +
14511 +
14512 +  /* Reset the USB interface. */
14513 +  /*
14514 +  *R_USB_COMMAND =
14515 +    IO_STATE(R_USB_COMMAND, port_sel, nop) |
14516 +    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
14517 +    IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
14518 +  nop();
14519 +  */
14520 +  DBFEXIT;
14521 +  return 0;
14522 +}
14523 +
14524 +/* start host controller */
14525 +static int crisv10_hcd_start(struct usb_hcd *hcd)
14526 +{
14527 +  DBFENTER;
14528 +  hcd_dbg(hcd, "start\n");
14529 +
14530 +  crisv10_ready_wait();
14531 +
14532 +  /* Start processing of USB traffic. */
14533 +  *R_USB_COMMAND =
14534 +    IO_STATE(R_USB_COMMAND, port_sel, nop) |
14535 +    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
14536 +    IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
14537 +
14538 +  nop();
14539 +
14540 +  hcd->state = HC_STATE_RUNNING;
14541 +
14542 +  DBFEXIT;
14543 +  return 0;
14544 +}
14545 +
14546 +/* stop host controller */
14547 +static void crisv10_hcd_stop(struct usb_hcd *hcd)
14548 +{
14549 +  DBFENTER;
14550 +  hcd_dbg(hcd, "stop\n");
14551 +  crisv10_hcd_reset(hcd);
14552 +  DBFEXIT;
14553 +}
14554 +
14555 +/* return the current frame number */
14556 +static int crisv10_hcd_get_frame(struct usb_hcd *hcd)
14557 +{
14558 +  DBFENTER;
14559 +  DBFEXIT;
14560 +  return (*R_USB_FM_NUMBER & 0x7ff);
14561 +}
14562 +
14563 +#ifdef CONFIG_USB_OTG
14564 +
14565 +static int crisv10_hcd_start_port_reset(struct usb_hcd *hcd, unsigned port)
14566 +{
14567 +  return 0; /* no-op for now */
14568 +}
14569 +
14570 +#endif /* CONFIG_USB_OTG */
14571 +
14572 +
14573 +/******************************************************************/
14574 +/* Root Hub functions                                             */
14575 +/******************************************************************/
14576 +
14577 +/* root hub status */
14578 +static const struct usb_hub_status rh_hub_status = 
14579 +  {
14580 +    .wHubStatus =              0,
14581 +    .wHubChange =              0,
14582 +  };
14583 +
14584 +/* root hub descriptor */
14585 +static const u8 rh_hub_descr[] =
14586 +  {
14587 +    0x09,                      /* bDescLength         */
14588 +    0x29,                      /* bDescriptorType     */
14589 +    USB_ROOT_HUB_PORTS,         /* bNbrPorts          */
14590 +    0x00,                      /* wHubCharacteristics */
14591 +    0x00,               
14592 +    0x01,                      /* bPwrOn2pwrGood      */
14593 +    0x00,                      /* bHubContrCurrent    */
14594 +    0x00,                      /* DeviceRemovable     */
14595 +    0xff                       /* PortPwrCtrlMask     */
14596 +  };
14597 +
14598 +/* Actual holder of root hub status*/
14599 +struct crisv10_rh rh;
14600 +
14601 +/* Initialize root hub data structures (called from dvdrv_hcd_probe()) */
14602 +int rh_init(void) {
14603 +  int i;
14604 +  /* Reset port status flags */
14605 +  for (i = 0; i < USB_ROOT_HUB_PORTS; i++) {
14606 +    rh.wPortChange[i] = 0;
14607 +    rh.wPortStatusPrev[i] = 0;
14608 +  }
14609 +  return 0;
14610 +}
14611 +
14612 +#define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\
14613 +                     (1<<USB_PORT_FEAT_ENABLE)|\
14614 +                     (1<<USB_PORT_FEAT_SUSPEND)|\
14615 +                     (1<<USB_PORT_FEAT_RESET))
14616 +
14617 +/* Handle port status change interrupt (called from bottom part interrupt) */
14618 +void rh_port_status_change(__u16 port_reg[]) {
14619 +  int i;
14620 +  __u16 wChange;
14621 +
14622 +  for(i = 0; i < USB_ROOT_HUB_PORTS; i++) {
14623 +    /* Xor out changes since last read, masked for important flags */
14624 +    wChange = (port_reg[i] & RH_FEAT_MASK) ^ rh.wPortStatusPrev[i];
14625 +    /* Or changes together with (if any) saved changes */
14626 +    rh.wPortChange[i] |= wChange;
14627 +    /* Save new status */
14628 +    rh.wPortStatusPrev[i] = port_reg[i];
14629 +
14630 +    if(wChange) {
14631 +      rh_dbg("Interrupt port_status change port%d: %s  Current-status:%s\n", i+1,
14632 +            port_status_to_str(wChange),
14633 +            port_status_to_str(port_reg[i]));
14634 +    }
14635 +  }
14636 +}
14637 +
14638 +/* Construct port status change bitmap for the root hub */
14639 +static int rh_status_data_request(struct usb_hcd *hcd, char *buf)
14640 +{
14641 +  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
14642 +  unsigned int i;
14643 +
14644 +  DBFENTER;
14645 +  /*
14646 +   * corresponds to hub status change EP (USB 2.0 spec section 11.13.4)
14647 +   * return bitmap indicating ports with status change
14648 +   */
14649 +  *buf = 0;
14650 +  spin_lock(&crisv10_hcd->lock);
14651 +  for (i = 1; i <= crisv10_hcd->num_ports; i++) {
14652 +    if (rh.wPortChange[map_port(i)]) {
14653 +      *buf |= (1 << i);
14654 +      rh_dbg("rh_status_data_request, change on port %d: %s  Current Status: %s\n", i,
14655 +            port_status_to_str(rh.wPortChange[map_port(i)]),
14656 +            port_status_to_str(rh.wPortStatusPrev[map_port(i)]));
14657 +    }
14658 +  }
14659 +  spin_unlock(&crisv10_hcd->lock);
14660 +  DBFEXIT;
14661 +  return *buf == 0 ? 0 : 1;
14662 +}
14663 +
14664 +/* Handle a control request for the root hub (called from hcd_driver) */
14665 +static int rh_control_request(struct usb_hcd *hcd, 
14666 +                             u16 typeReq, 
14667 +                             u16 wValue, 
14668 +                             u16 wIndex,
14669 +                             char *buf, 
14670 +                             u16 wLength) {
14671 +
14672 +  struct crisv10_hcd *crisv10_hcd = hcd_to_crisv10_hcd(hcd);
14673 +  int retval = 0;
14674 +  int len;
14675 +  DBFENTER;
14676 +
14677 +  switch (typeReq) {
14678 +  case GetHubDescriptor:
14679 +    rh_dbg("GetHubDescriptor\n");
14680 +    len = min_t(unsigned int, sizeof rh_hub_descr, wLength);
14681 +    memcpy(buf, rh_hub_descr, len);
14682 +    buf[2] = crisv10_hcd->num_ports;
14683 +    break;
14684 +  case GetHubStatus:
14685 +    rh_dbg("GetHubStatus\n");
14686 +    len = min_t(unsigned int, sizeof rh_hub_status, wLength);
14687 +    memcpy(buf, &rh_hub_status, len);
14688 +    break;
14689 +  case GetPortStatus:
14690 +    if (!wIndex || wIndex > crisv10_hcd->num_ports)
14691 +      goto error;
14692 +    rh_dbg("GetportStatus, port:%d change:%s  status:%s\n", wIndex,
14693 +          port_status_to_str(rh.wPortChange[map_port(wIndex)]),
14694 +          port_status_to_str(rh.wPortStatusPrev[map_port(wIndex)]));
14695 +    *(u16 *) buf = cpu_to_le16(rh.wPortStatusPrev[map_port(wIndex)]);
14696 +    *(u16 *) (buf + 2) = cpu_to_le16(rh.wPortChange[map_port(wIndex)]);
14697 +    break;
14698 +  case SetHubFeature:
14699 +    rh_dbg("SetHubFeature\n");
14700 +  case ClearHubFeature:
14701 +    rh_dbg("ClearHubFeature\n");
14702 +    switch (wValue) {
14703 +    case C_HUB_OVER_CURRENT:
14704 +    case C_HUB_LOCAL_POWER:
14705 +      rh_warn("Not implemented hub request:%d \n", typeReq);
14706 +      /* not implemented */
14707 +      break;
14708 +    default:
14709 +      goto error;
14710 +    }
14711 +    break;
14712 +  case SetPortFeature:
14713 +    if (!wIndex || wIndex > crisv10_hcd->num_ports)
14714 +      goto error;
14715 +    if(rh_set_port_feature(map_port(wIndex), wValue))
14716 +      goto error;
14717 +    break;
14718 +  case ClearPortFeature:
14719 +    if (!wIndex || wIndex > crisv10_hcd->num_ports)
14720 +      goto error;
14721 +    if(rh_clear_port_feature(map_port(wIndex), wValue))
14722 +      goto error;
14723 +    break;
14724 +  default:
14725 +    rh_warn("Unknown hub request: %d\n", typeReq);
14726 +  error:
14727 +    retval = -EPIPE;
14728 +  }
14729 +  DBFEXIT;
14730 +  return retval;
14731 +}
14732 +
14733 +int rh_set_port_feature(__u8 bPort, __u16 wFeature) {
14734 +  __u8 bUsbCommand = 0;
14735 +  switch(wFeature) {
14736 +  case USB_PORT_FEAT_RESET:
14737 +    rh_dbg("SetPortFeature: reset\n");
14738 +    bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, reset);
14739 +    goto set;
14740 +    break;
14741 +  case USB_PORT_FEAT_SUSPEND:
14742 +    rh_dbg("SetPortFeature: suspend\n");
14743 +    bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, suspend);
14744 +    goto set;
14745 +    break;
14746 +  case USB_PORT_FEAT_POWER:
14747 +    rh_dbg("SetPortFeature: power\n");
14748 +    break;
14749 +  case USB_PORT_FEAT_C_CONNECTION:
14750 +    rh_dbg("SetPortFeature: c_connection\n");
14751 +    break;
14752 +  case USB_PORT_FEAT_C_RESET:
14753 +    rh_dbg("SetPortFeature: c_reset\n");
14754 +    break;
14755 +  case USB_PORT_FEAT_C_OVER_CURRENT:
14756 +    rh_dbg("SetPortFeature: c_over_current\n");
14757 +    break;
14758 +
14759 +  set:
14760 +    /* Select which port via the port_sel field */
14761 +    bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
14762 +
14763 +    /* Make sure the controller isn't busy. */
14764 +    crisv10_ready_wait();
14765 +    /* Send out the actual command to the USB controller */
14766 +    *R_USB_COMMAND = bUsbCommand;
14767 +
14768 +    /* If port reset then also bring USB controller into running state */
14769 +    if(wFeature == USB_PORT_FEAT_RESET) {
14770 +      /* Wait a while for controller to first become started after port reset */
14771 +      udelay(12000); /* 12ms blocking wait */
14772 +      
14773 +      /* Make sure the controller isn't busy. */
14774 +      crisv10_ready_wait();
14775 +
14776 +      /* If all enabled ports were disabled the host controller goes down into
14777 +        started mode, so we need to bring it back into the running state.
14778 +        (This is safe even if it's already in the running state.) */
14779 +      *R_USB_COMMAND =
14780 +       IO_STATE(R_USB_COMMAND, port_sel, nop) |
14781 +       IO_STATE(R_USB_COMMAND, port_cmd, reset) |
14782 +       IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
14783 +    }
14784 +
14785 +    break;
14786 +  default:
14787 +    rh_dbg("SetPortFeature: unknown feature\n");
14788 +    return -1;
14789 +  }
14790 +  return 0;
14791 +}
14792 +
14793 +int rh_clear_port_feature(__u8 bPort, __u16 wFeature) {
14794 +  switch(wFeature) {
14795 +  case USB_PORT_FEAT_ENABLE:
14796 +    rh_dbg("ClearPortFeature: enable\n");
14797 +    rh_disable_port(bPort);
14798 +    break;
14799 +  case USB_PORT_FEAT_SUSPEND:
14800 +    rh_dbg("ClearPortFeature: suspend\n");
14801 +    break;
14802 +  case USB_PORT_FEAT_POWER:
14803 +    rh_dbg("ClearPortFeature: power\n");
14804 +    break;
14805 +
14806 +  case USB_PORT_FEAT_C_ENABLE:
14807 +    rh_dbg("ClearPortFeature: c_enable\n");
14808 +    goto clear;
14809 +  case USB_PORT_FEAT_C_SUSPEND:
14810 +    rh_dbg("ClearPortFeature: c_suspend\n");
14811 +    goto clear;
14812 +  case USB_PORT_FEAT_C_CONNECTION:
14813 +    rh_dbg("ClearPortFeature: c_connection\n");
14814 +    goto clear;
14815 +  case USB_PORT_FEAT_C_OVER_CURRENT:
14816 +    rh_dbg("ClearPortFeature: c_over_current\n");
14817 +    goto clear;
14818 +  case USB_PORT_FEAT_C_RESET:
14819 +    rh_dbg("ClearPortFeature: c_reset\n");
14820 +    goto clear;
14821 +  clear:
14822 +    rh.wPortChange[bPort] &= ~(1 << (wFeature - 16));
14823 +    break;
14824 +  default:
14825 +    rh_dbg("ClearPortFeature: unknown feature\n");
14826 +    return -1;
14827 +  }
14828 +  return 0;
14829 +}
14830 +
14831 +
14832 +#ifdef CONFIG_PM
14833 +/* Handle a suspend request for the root hub (called from hcd_driver) */
14834 +static int rh_suspend_request(struct usb_hcd *hcd)
14835 +{
14836 +  return 0; /* no-op for now */
14837 +}
14838 +
14839 +/* Handle a resume request for the root hub (called from hcd_driver) */
14840 +static int rh_resume_request(struct usb_hcd *hcd)
14841 +{
14842 +  return 0; /* no-op for now */
14843 +}
14844 +#endif /* CONFIG_PM */
14845 +
14846 +
14847 +
14848 +/* Wrapper function for workaround port disable registers in USB controller  */
14849 +static void rh_disable_port(unsigned int port) {
14850 +  volatile int timeout = 10000;
14851 +  volatile char* usb_portx_disable;
14852 +  switch(port) {
14853 +  case 0:
14854 +    usb_portx_disable = R_USB_PORT1_DISABLE;
14855 +    break;
14856 +  case 1:
14857 +    usb_portx_disable = R_USB_PORT2_DISABLE;
14858 +    break;
14859 +  default:
14860 +    /* Invalid port index */
14861 +    return;
14862 +  }
14863 +  /* Set disable flag in special register  */
14864 +  *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
14865 +  /* Wait until not enabled anymore */
14866 +  while((rh.wPortStatusPrev[port] &
14867 +       IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
14868 +       (timeout-- > 0));
14869 +  if(timeout == 0) {
14870 +    warn("Timeout while waiting for port %d to become disabled\n", port);
14871 +  }
14872 +  /* clear disable flag in special register  */
14873 +  *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
14874 +  rh_info("Physical port %d disabled\n", port+1);
14875 +}
14876 +
14877 +
14878 +/******************************************************************/
14879 +/* Transfer Controller (TC) functions                             */
14880 +/******************************************************************/
14881 +
14882 +/* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it
14883 +   dynamically?
14884 +   To adjust it dynamically we would have to get an interrupt when we reach
14885 +   the end of the rx descriptor list, or when we get close to the end, and
14886 +   then allocate more descriptors. */
14887 +#define NBR_OF_RX_DESC     512
14888 +#define RX_DESC_BUF_SIZE   1024
14889 +#define RX_BUF_SIZE        (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
14890 +
14891 +
14892 +/* Local variables for Transfer Controller */
14893 +/* --------------------------------------- */
14894 +
14895 +/* This is a circular (double-linked) list of the active urbs for each epid.
14896 +   The head is never removed, and new urbs are linked onto the list as
14897 +   urb_entry_t elements. Don't reference urb_list directly; use the wrapper
14898 +   functions instead (which includes spin_locks) */
14899 +static struct list_head urb_list[NBR_OF_EPIDS];
14900 +
14901 +/* Read about the need and usage of this lock in submit_ctrl_urb. */
14902 +/* Lock for URB lists for each EPID */
14903 +static spinlock_t urb_list_lock;
14904 +
14905 +/* Lock for EPID array register (R_USB_EPT_x) in Etrax */
14906 +static spinlock_t etrax_epid_lock;
14907 +
14908 +/* Lock for dma8 sub0 handling */
14909 +static spinlock_t etrax_dma8_sub0_lock;
14910 +
14911 +/* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
14912 +   Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be
14913 +   cache aligned. */
14914 +static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
14915 +static volatile struct USB_IN_Desc RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
14916 +
14917 +/* Pointers into RxDescList. */
14918 +static volatile struct USB_IN_Desc *myNextRxDesc;
14919 +static volatile struct USB_IN_Desc *myLastRxDesc;
14920 +
14921 +/* A zout transfer makes a memory access at the address of its buf pointer,
14922 +   which means that setting this buf pointer to 0 will cause an access to the
14923 +   flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes
14924 +   (depending on DMA burst size) transfer.
14925 +   Instead, we set it to 1, and point it to this buffer. */
14926 +static int zout_buffer[4] __attribute__ ((aligned (4)));
14927 +
14928 +/* Cache for allocating new EP and SB descriptors. */
14929 +static kmem_cache_t *usb_desc_cache;
14930 +
14931 +/* Cache for the data allocated in the isoc descr top half. */
14932 +static kmem_cache_t *isoc_compl_cache;
14933 +
14934 +/* Cache for the data allocated when delayed finishing of URBs */
14935 +static kmem_cache_t *later_data_cache;
14936 +
14937 +
14938 +/* Counter to keep track of how many Isoc EP we have sat up. Used to enable
14939 +   and disable iso_eof interrupt. We only need these interrupts when we have
14940 +   Isoc data endpoints (consumes CPU cycles).
14941 +   FIXME: This could be more fine granular, so this interrupt is only enabled
14942 +   when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */
14943 +static int isoc_epid_counter;
14944 +
14945 +/* Protecting wrapper functions for R_USB_EPT_x */
14946 +/* -------------------------------------------- */
14947 +static inline void etrax_epid_set(__u8 index, __u32 data) {
14948 +  unsigned long flags;
14949 +  spin_lock_irqsave(&etrax_epid_lock, flags);
14950 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
14951 +  nop();
14952 +  *R_USB_EPT_DATA = data;
14953 +  spin_unlock_irqrestore(&etrax_epid_lock, flags);
14954 +}
14955 +
14956 +static inline void etrax_epid_clear_error(__u8 index) {
14957 +  unsigned long flags;
14958 +  spin_lock_irqsave(&etrax_epid_lock, flags);
14959 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
14960 +  nop();
14961 +  *R_USB_EPT_DATA &=
14962 +    ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
14963 +      IO_MASK(R_USB_EPT_DATA, error_count_out) |
14964 +      IO_MASK(R_USB_EPT_DATA, error_code));
14965 +  spin_unlock_irqrestore(&etrax_epid_lock, flags);
14966 +}
14967 +
14968 +static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
14969 +                                             __u8 toggle) {
14970 +  unsigned long flags;
14971 +  spin_lock_irqsave(&etrax_epid_lock, flags);
14972 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
14973 +  nop();
14974 +  if(dirout) {
14975 +    *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
14976 +    *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
14977 +  } else {
14978 +    *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
14979 +    *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
14980 +  }
14981 +  spin_unlock_irqrestore(&etrax_epid_lock, flags);
14982 +}
14983 +
14984 +static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout) {
14985 +  unsigned long flags;
14986 +  __u8 toggle;
14987 +  spin_lock_irqsave(&etrax_epid_lock, flags);
14988 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
14989 +  nop();
14990 +  if (dirout) {
14991 +    toggle = IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
14992 +  } else {
14993 +    toggle = IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
14994 +  }
14995 +  spin_unlock_irqrestore(&etrax_epid_lock, flags);
14996 +  return toggle;
14997 +}
14998 +
14999 +
15000 +static inline __u32 etrax_epid_get(__u8 index) {
15001 +  unsigned long flags;
15002 +  __u32 data;
15003 +  spin_lock_irqsave(&etrax_epid_lock, flags);
15004 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
15005 +  nop();
15006 +  data = *R_USB_EPT_DATA;
15007 +  spin_unlock_irqrestore(&etrax_epid_lock, flags);
15008 +  return data;
15009 +}
15010 +
15011 +
15012 +
15013 +
15014 +/* Main functions for Transfer Controller */
15015 +/* -------------------------------------- */
15016 +
15017 +/* Init structs, memories and lists used by Transfer Controller */
15018 +int tc_init(struct usb_hcd *hcd) {
15019 +  int i;
15020 +  /* Clear software state info for all epids */
15021 +  memset(epid_state, 0, sizeof(struct etrax_epid) * NBR_OF_EPIDS);
15022 +
15023 +  /* Set Invalid and Dummy as being in use and disabled */
15024 +  epid_state[INVALID_EPID].inuse = 1;
15025 +  epid_state[DUMMY_EPID].inuse = 1;
15026 +  epid_state[INVALID_EPID].disabled = 1;
15027 +  epid_state[DUMMY_EPID].disabled = 1;
15028 +
15029 +  /* Clear counter for how many Isoc epids we have sat up */
15030 +  isoc_epid_counter = 0;
15031 +
15032 +  /* Initialize the urb list by initiating a head for each list.
15033 +     Also reset list hodling active URB for each epid */
15034 +  for (i = 0; i < NBR_OF_EPIDS; i++) {
15035 +    INIT_LIST_HEAD(&urb_list[i]);
15036 +    activeUrbList[i] = NULL;
15037 +  }
15038 +
15039 +  /* Init lock for URB lists */
15040 +  spin_lock_init(&urb_list_lock);
15041 +  /* Init lock for Etrax R_USB_EPT register */
15042 +  spin_lock_init(&etrax_epid_lock);
15043 +  /* Init lock for Etrax dma8 sub0 handling */
15044 +  spin_lock_init(&etrax_dma8_sub0_lock);
15045 +
15046 +  /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
15047 +
15048 +  /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also
15049 +     allocate SB descriptors from this cache. This is ok since
15050 +     sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */
15051 +  usb_desc_cache = kmem_cache_create("usb_desc_cache",
15052 +                                    sizeof(struct USB_EP_Desc), 0,
15053 +                                    SLAB_HWCACHE_ALIGN, 0, 0);
15054 +  if(usb_desc_cache == NULL) {
15055 +    return -ENOMEM;
15056 +  }
15057 +
15058 +  /* Create slab cache for speedy allocation of memory for isoc bottom-half
15059 +     interrupt handling */
15060 +  isoc_compl_cache =
15061 +    kmem_cache_create("isoc_compl_cache",
15062 +                     sizeof(struct crisv10_isoc_complete_data),
15063 +                     0, SLAB_HWCACHE_ALIGN, 0, 0);
15064 +  if(isoc_compl_cache == NULL) {
15065 +    return -ENOMEM;
15066 +  }
15067 +
15068 +  /* Create slab cache for speedy allocation of memory for later URB finish
15069 +     struct */
15070 +  later_data_cache =
15071 +    kmem_cache_create("later_data_cache",
15072 +                     sizeof(struct urb_later_data),
15073 +                     0, SLAB_HWCACHE_ALIGN, 0, 0);
15074 +  if(later_data_cache == NULL) {
15075 +    return -ENOMEM;
15076 +  }
15077 +
15078 +
15079 +  /* Initiate the bulk start timer. */
15080 +  init_timer(&bulk_start_timer);
15081 +  bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
15082 +  bulk_start_timer.function = tc_bulk_start_timer_func;
15083 +  add_timer(&bulk_start_timer);
15084 +
15085 +
15086 +  /* Initiate the bulk eot timer. */
15087 +  init_timer(&bulk_eot_timer);
15088 +  bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
15089 +  bulk_eot_timer.function = tc_bulk_eot_timer_func;
15090 +  bulk_eot_timer.data = (unsigned long)hcd;
15091 +  add_timer(&bulk_eot_timer);
15092 +
15093 +  return 0;
15094 +}
15095 +
15096 +/* Uninitialize all resources used by Transfer Controller */
15097 +void tc_destroy(void) {
15098 +
15099 +  /* Destroy all slab cache */
15100 +  kmem_cache_destroy(usb_desc_cache);
15101 +  kmem_cache_destroy(isoc_compl_cache);
15102 +  kmem_cache_destroy(later_data_cache);
15103 +
15104 +  /* Remove timers */
15105 +  del_timer(&bulk_start_timer);
15106 +  del_timer(&bulk_eot_timer);
15107 +}
15108 +
15109 +static void restart_dma8_sub0(void) {
15110 +  unsigned long flags;
15111 +  spin_lock_irqsave(&etrax_dma8_sub0_lock, flags);
15112 +  /* Verify that the dma is not running */
15113 +  if ((*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd)) == 0) {
15114 +    struct USB_EP_Desc *ep = (struct USB_EP_Desc *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
15115 +    while (DUMMY_EPID == IO_EXTRACT(USB_EP_command, epid, ep->command)) {
15116 +      ep = (struct USB_EP_Desc *)phys_to_virt(ep->next);
15117 +    }
15118 +    /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID.
15119 +     * ep->next is already a physical address; no need for a virt_to_phys. */
15120 +    *R_DMA_CH8_SUB0_EP = ep->next;
15121 +    /* Restart the DMA */
15122 +    *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
15123 +  }
15124 +  spin_unlock_irqrestore(&etrax_dma8_sub0_lock, flags);
15125 +}
15126 +
15127 +/* queue an URB with the transfer controller (called from hcd_driver) */
15128 +static int tc_urb_enqueue(struct usb_hcd *hcd, 
15129 +                         struct usb_host_endpoint *ep,
15130 +                         struct urb *urb, 
15131 +                         gfp_t mem_flags) {
15132 +  int epid;
15133 +  int retval;
15134 +  int bustime = 0;
15135 +  int maxpacket;
15136 +  unsigned long flags;
15137 +  struct crisv10_urb_priv *urb_priv;
15138 +  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
15139 +  DBFENTER;
15140 +
15141 +  if(!(crisv10_hcd->running)) {
15142 +    /* The USB Controller is not running, probably because no device is 
15143 +       attached. No idea to enqueue URBs then */
15144 +    tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n",
15145 +           (unsigned int)urb);
15146 +    return -ENOENT;
15147 +  }
15148 +
15149 +  maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
15150 +  /* Special case check for In Isoc transfers. Specification states that each
15151 +     In Isoc transfer consists of one packet and therefore it should fit into
15152 +     the transfer-buffer of an URB.
15153 +     We do the check here to be sure (an invalid scenario can be produced with
15154 +     parameters to the usbtest suite) */
15155 +  if(usb_pipeisoc(urb->pipe) && usb_pipein(urb->pipe) &&
15156 +     (urb->transfer_buffer_length < maxpacket)) {
15157 +    tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb->transfer_buffer_length, maxpacket);
15158 +    return -EMSGSIZE;
15159 +  }
15160 +
15161 +  /* Check if there is enough bandwidth for periodic transfer  */
15162 +  if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe)) {
15163 +    /* only check (and later claim) if not already claimed */
15164 +    if (urb->bandwidth == 0) {
15165 +      bustime = usb_check_bandwidth(urb->dev, urb);
15166 +      if (bustime < 0) {
15167 +       tc_err("Not enough periodic bandwidth\n");
15168 +       return -ENOSPC;
15169 +      }
15170 +    }
15171 +  }
15172 +
15173 +  /* Check if there is a epid for URBs destination, if not this function
15174 +     set up one. */
15175 +  epid = tc_setup_epid(ep, urb, mem_flags);
15176 +  if (epid < 0) {
15177 +    tc_err("Failed setup epid:%d for URB:0x%x\n", epid, (unsigned int)urb);
15178 +    DBFEXIT;
15179 +    return -ENOMEM;
15180 +  }
15181 +
15182 +  if(urb == activeUrbList[epid]) {
15183 +    tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb);
15184 +    return -ENXIO;
15185 +  }
15186 +
15187 +  if(urb_list_entry(urb, epid)) {
15188 +    tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb);
15189 +    return -ENXIO;
15190 +  }
15191 +
15192 +  /* If we actively have flaged endpoint as disabled then refuse submition */
15193 +  if(epid_state[epid].disabled) {
15194 +    return -ENOENT;
15195 +  }
15196 +
15197 +  /* Allocate and init HC-private data for URB */
15198 +  if(urb_priv_create(hcd, urb, epid, mem_flags) != 0) {
15199 +    DBFEXIT;
15200 +    return -ENOMEM;
15201 +  }
15202 +  urb_priv = urb->hcpriv;
15203 +
15204 +  tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n",
15205 +        (unsigned int)urb, urb_priv->urb_num, epid,
15206 +        pipe_to_str(urb->pipe), urb->transfer_buffer_length);
15207 +
15208 +  /* Create and link SBs required for this URB */
15209 +  retval = create_sb_for_urb(urb, mem_flags);
15210 +  if(retval != 0) {
15211 +    tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb,
15212 +          urb_priv->urb_num);
15213 +    urb_priv_free(hcd, urb);
15214 +    DBFEXIT;
15215 +    return retval;
15216 +  }
15217 +
15218 +  /* Init intr EP pool if this URB is a INTR transfer. This pool is later
15219 +     used when inserting EPs in the TxIntrEPList. We do the alloc here
15220 +     so we can't run out of memory later */
15221 +  if(usb_pipeint(urb->pipe)) {
15222 +    retval = init_intr_urb(urb, mem_flags);
15223 +    if(retval != 0) {
15224 +      tc_warn("Failed to init Intr URB\n");
15225 +      urb_priv_free(hcd, urb);
15226 +      DBFEXIT;
15227 +      return retval;
15228 +    }
15229 +  }
15230 +
15231 +  /* Disable other access when inserting USB */
15232 +  local_irq_save(flags);
15233 +
15234 +  /* Claim bandwidth, if needed */
15235 +  if(bustime) {
15236 +    usb_claim_bandwidth(urb->dev, urb, bustime, 0);
15237 +  }
15238 +  
15239 +  /* Add URB to EP queue */
15240 +  urb_list_add(urb, epid, mem_flags);
15241 +
15242 +  if(usb_pipeisoc(urb->pipe)) {
15243 +    /* Special processing of Isoc URBs. */
15244 +    tc_dma_process_isoc_urb(urb);
15245 +  } else {
15246 +    /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
15247 +    tc_dma_process_queue(epid);
15248 +  }
15249 +
15250 +  local_irq_restore(flags);
15251 +
15252 +  DBFEXIT;
15253 +  return 0;
15254 +}
15255 +
15256 +/* remove an URB from the transfer controller queues (called from hcd_driver)*/
15257 +static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) {
15258 +  struct crisv10_urb_priv *urb_priv;
15259 +  unsigned long flags;
15260 +  int epid;
15261 +
15262 +  DBFENTER;
15263 +  /* Disable interrupts here since a descriptor interrupt for the isoc epid
15264 +     will modify the sb list.  This could possibly be done more granular, but
15265 +     urb_dequeue should not be used frequently anyway.
15266 +  */
15267 +  local_irq_save(flags);
15268 +
15269 +  urb_priv = urb->hcpriv;
15270 +
15271 +  if (!urb_priv) {
15272 +    /* This happens if a device driver calls unlink on an urb that
15273 +       was never submitted (lazy driver) or if the urb was completed
15274 +       while dequeue was being called. */
15275 +    tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb);
15276 +    local_irq_restore(flags);
15277 +    return 0;
15278 +  }
15279 +  epid = urb_priv->epid;
15280 +
15281 +  tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
15282 +         (urb == activeUrbList[epid]) ? "active" : "queued",
15283 +         (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
15284 +         str_type(urb->pipe), epid, urb->status,
15285 +         (urb_priv->later_data) ? "later-sched" : "");
15286 +
15287 +  /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB
15288 +     that isn't active can be dequeued by just removing it from the queue */
15289 +  if(usb_pipebulk(urb->pipe) || usb_pipecontrol(urb->pipe) ||
15290 +     usb_pipeint(urb->pipe)) {
15291 +
15292 +    /* Check if URB haven't gone further than the queue */
15293 +    if(urb != activeUrbList[epid]) {
15294 +      ASSERT(urb_priv->later_data == NULL);
15295 +      tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue"
15296 +             " (not active)\n", (unsigned int)urb, urb_priv->urb_num,
15297 +             str_dir(urb->pipe), str_type(urb->pipe), epid);
15298 +      
15299 +      /* Finish the URB with error status from USB core */
15300 +      tc_finish_urb(hcd, urb, urb->status);
15301 +      local_irq_restore(flags);
15302 +      return 0;
15303 +    }
15304 +  }
15305 +
15306 +  /* Set URB status to Unlink for handling when interrupt comes. */
15307 +  urb_priv->urb_state = UNLINK;
15308 +
15309 +  /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */
15310 +  switch(usb_pipetype(urb->pipe)) {
15311 +  case PIPE_BULK:
15312 +    /* Check if EP still is enabled */
15313 +    if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
15314 +      /* The EP was enabled, disable it. */
15315 +      TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
15316 +    }
15317 +    /* Kicking dummy list out of the party. */
15318 +    TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
15319 +    break;
15320 +  case PIPE_CONTROL:
15321 +    /* Check if EP still is enabled */
15322 +    if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
15323 +      /* The EP was enabled, disable it. */
15324 +      TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
15325 +    }
15326 +    break;
15327 +  case PIPE_ISOCHRONOUS:
15328 +    /* Disabling, busy-wait and unlinking of Isoc SBs will be done in
15329 +       finish_isoc_urb(). Because there might the case when URB is dequeued
15330 +       but there are other valid URBs waiting */
15331 +
15332 +    /* Check if In Isoc EP still is enabled */
15333 +    if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
15334 +      /* The EP was enabled, disable it. */
15335 +      TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
15336 +    }
15337 +    break;
15338 +  case PIPE_INTERRUPT:
15339 +    /* Special care is taken for interrupt URBs. EPs are unlinked in
15340 +       tc_finish_urb */
15341 +    break;
15342 +  default:
15343 +    break;
15344 +  }
15345 +
15346 +  /* Asynchronous unlink, finish the URB later from scheduled or other
15347 +     event (data finished, error) */
15348 +  tc_finish_urb_later(hcd, urb, urb->status);
15349 +
15350 +  local_irq_restore(flags);
15351 +  DBFEXIT;
15352 +  return 0;
15353 +}
15354 +
15355 +
15356 +static void tc_sync_finish_epid(struct usb_hcd *hcd, int epid) {
15357 +  volatile int timeout = 10000;
15358 +  struct urb* urb;
15359 +  struct crisv10_urb_priv* urb_priv;
15360 +  unsigned long flags;
15361 +  
15362 +  volatile struct USB_EP_Desc *first_ep;  /* First EP in the list. */
15363 +  volatile struct USB_EP_Desc *curr_ep;   /* Current EP, the iterator. */
15364 +  volatile struct USB_EP_Desc *next_ep;   /* The EP after current. */
15365 +
15366 +  int type = epid_state[epid].type;
15367 +
15368 +  /* Setting this flag will cause enqueue() to return -ENOENT for new
15369 +     submitions on this endpoint and finish_urb() wont process queue further */
15370 +  epid_state[epid].disabled = 1;
15371 +
15372 +  switch(type) {
15373 +  case PIPE_BULK:
15374 +    /* Check if EP still is enabled */
15375 +    if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
15376 +      /* The EP was enabled, disable it. */
15377 +      TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
15378 +      tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
15379 +
15380 +      /* Do busy-wait until DMA not using this EP descriptor anymore */
15381 +      while((*R_DMA_CH8_SUB0_EP ==
15382 +            virt_to_phys(&TxBulkEPList[epid])) &&
15383 +           (timeout-- > 0));
15384 +      if(timeout == 0) {
15385 +       warn("Timeout while waiting for DMA-TX-Bulk to leave EP for"
15386 +            " epid:%d\n", epid);
15387 +      }
15388 +    }
15389 +    break;
15390 +
15391 +  case PIPE_CONTROL:
15392 +    /* Check if EP still is enabled */
15393 +    if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
15394 +      /* The EP was enabled, disable it. */
15395 +      TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
15396 +      tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
15397 +
15398 +      /* Do busy-wait until DMA not using this EP descriptor anymore */
15399 +      while((*R_DMA_CH8_SUB1_EP ==
15400 +            virt_to_phys(&TxCtrlEPList[epid])) &&
15401 +           (timeout-- > 0));
15402 +      if(timeout == 0) {
15403 +       warn("Timeout while waiting for DMA-TX-Ctrl to leave EP for"
15404 +            " epid:%d\n", epid);
15405 +      }
15406 +    }
15407 +    break;
15408 +
15409 +  case PIPE_INTERRUPT:
15410 +    local_irq_save(flags);
15411 +    /* Disable all Intr EPs belonging to epid */
15412 +    first_ep = &TxIntrEPList[0];
15413 +    curr_ep = first_ep;
15414 +    do {
15415 +      next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
15416 +      if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
15417 +       /* Disable EP */
15418 +       next_ep->command &= ~IO_MASK(USB_EP_command, enable);
15419 +      }
15420 +      curr_ep = phys_to_virt(curr_ep->next);
15421 +    } while (curr_ep != first_ep);
15422 +
15423 +    local_irq_restore(flags);
15424 +    break;
15425 +
15426 +  case PIPE_ISOCHRONOUS:
15427 +    /* Check if EP still is enabled */
15428 +    if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
15429 +      tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid);
15430 +      /* The EP was enabled, disable it. */
15431 +      TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
15432 +      
15433 +      while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
15434 +           (timeout-- > 0));
15435 +      if(timeout == 0) {
15436 +       warn("Timeout while waiting for DMA-TX-Isoc to leave EP for"
15437 +            " epid:%d\n", epid);
15438 +      }
15439 +    }
15440 +    break;
15441 +  }
15442 +
15443 +  local_irq_save(flags);
15444 +
15445 +  /* Finish if there is active URB for this endpoint */
15446 +  if(activeUrbList[epid] != NULL) {
15447 +    urb = activeUrbList[epid];
15448 +    urb_priv = urb->hcpriv;
15449 +    ASSERT(urb_priv);
15450 +    tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
15451 +           (urb == activeUrbList[epid]) ? "active" : "queued",
15452 +           (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
15453 +           str_type(urb->pipe), epid, urb->status,
15454 +           (urb_priv->later_data) ? "later-sched" : "");
15455 +
15456 +    tc_finish_urb(hcd, activeUrbList[epid], -ENOENT);
15457 +    ASSERT(activeUrbList[epid] == NULL);
15458 +  }
15459 +
15460 +  /* Finish any queued URBs for this endpoint. There won't be any resubmitions
15461 +     because epid_disabled causes enqueue() to fail for this endpoint */
15462 +  while((urb = urb_list_first(epid)) != NULL) {
15463 +    urb_priv = urb->hcpriv;
15464 +    ASSERT(urb_priv);
15465 +
15466 +    tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
15467 +           (urb == activeUrbList[epid]) ? "active" : "queued",
15468 +           (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
15469 +           str_type(urb->pipe), epid, urb->status,
15470 +           (urb_priv->later_data) ? "later-sched" : "");
15471 +
15472 +    tc_finish_urb(hcd, urb, -ENOENT);
15473 +  }
15474 +  epid_state[epid].disabled = 0;
15475 +  local_irq_restore(flags);
15476 +}
15477 +
15478 +/* free resources associated with an endpoint (called from hcd_driver) */
15479 +static void tc_endpoint_disable(struct usb_hcd *hcd, 
15480 +                               struct usb_host_endpoint *ep) {
15481 +  DBFENTER;
15482 +  /* Only free epid if it has been allocated. We get two endpoint_disable
15483 +     requests for ctrl endpoints so ignore the second one */
15484 +  if(ep->hcpriv != NULL) {
15485 +    struct crisv10_ep_priv *ep_priv = ep->hcpriv;
15486 +    int epid = ep_priv->epid;
15487 +    tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n",
15488 +          (unsigned int)ep, (unsigned int)ep->hcpriv,
15489 +          endpoint_to_str(&(ep->desc)), epid);
15490 +
15491 +    tc_sync_finish_epid(hcd, epid);
15492 +
15493 +    ASSERT(activeUrbList[epid] == NULL);
15494 +    ASSERT(list_empty(&urb_list[epid]));
15495 +
15496 +    tc_free_epid(ep);
15497 +  } else {
15498 +    tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep,
15499 +          (unsigned int)ep->hcpriv, endpoint_to_str(&(ep->desc)));
15500 +  }
15501 +  DBFEXIT;
15502 +}
15503 +
15504 +static void tc_finish_urb_later_proc(void *data) {
15505 +  unsigned long flags;
15506 +  struct urb_later_data* uld = (struct urb_later_data*)data;
15507 +  local_irq_save(flags);
15508 +  if(uld->urb == NULL) {
15509 +    late_dbg("Later finish of URB = NULL (allready finished)\n");
15510 +  } else {
15511 +    struct crisv10_urb_priv* urb_priv = uld->urb->hcpriv;
15512 +    ASSERT(urb_priv);
15513 +    if(urb_priv->urb_num == uld->urb_num) {
15514 +      late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld->urb),
15515 +              urb_priv->urb_num);
15516 +      if(uld->status != uld->urb->status) {
15517 +       errno_dbg("Later-finish URB with status:%d, later-status:%d\n",
15518 +                 uld->urb->status, uld->status);
15519 +      }
15520 +      if(uld != urb_priv->later_data) {
15521 +       panic("Scheduled uld not same as URBs uld\n");
15522 +      }
15523 +      tc_finish_urb(uld->hcd, uld->urb, uld->status);
15524 +    } else {
15525 +      late_warn("Ignoring later finish of URB:0x%x[%d]"
15526 +               ", urb_num doesn't match current URB:0x%x[%d]",
15527 +               (unsigned int)(uld->urb), uld->urb_num,
15528 +               (unsigned int)(uld->urb), urb_priv->urb_num);
15529 +    }
15530 +  }
15531 +  local_irq_restore(flags);
15532 +  kmem_cache_free(later_data_cache, uld);
15533 +}
15534 +
15535 +static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
15536 +                               int status) {
15537 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
15538 +  struct urb_later_data* uld;
15539 +
15540 +  ASSERT(urb_priv);
15541 +
15542 +  if(urb_priv->later_data != NULL) {
15543 +    /* Later-finish allready scheduled for this URB, just update status to
15544 +       return when finishing later */
15545 +    errno_dbg("Later-finish schedule change URB status:%d with new"
15546 +             " status:%d\n", urb_priv->later_data->status, status);
15547 +    
15548 +    urb_priv->later_data->status = status;
15549 +    return;
15550 +  }
15551 +
15552 +  uld = kmem_cache_alloc(later_data_cache, SLAB_ATOMIC);
15553 +  ASSERT(uld);
15554 +
15555 +  uld->hcd = hcd;
15556 +  uld->urb = urb;
15557 +  uld->urb_num = urb_priv->urb_num;
15558 +  uld->status = status;
15559 +
15560 +  INIT_WORK(&uld->ws, tc_finish_urb_later_proc, uld);
15561 +  urb_priv->later_data = uld;
15562 +
15563 +  /* Schedule the finishing of the URB to happen later */
15564 +  schedule_delayed_work(&uld->ws, LATER_TIMER_DELAY);
15565 +}
15566 +
15567 +static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
15568 +                              int status);
15569 +
15570 +static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status) {
15571 +  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
15572 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
15573 +  int epid;
15574 +  char toggle;
15575 +  int urb_num;
15576 +
15577 +  DBFENTER;
15578 +  ASSERT(urb_priv != NULL);
15579 +  epid = urb_priv->epid;
15580 +  urb_num = urb_priv->urb_num;
15581 +
15582 +  if(urb != activeUrbList[epid]) {
15583 +    if(urb_list_entry(urb, epid)) {
15584 +      /* Remove this URB from the list. Only happens when URB are finished
15585 +        before having been processed (dequeing) */
15586 +      urb_list_del(urb, epid);
15587 +    } else {
15588 +      tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for"
15589 +             " epid:%d\n", (unsigned int)urb, urb_num, epid);
15590 +    }
15591 +  }
15592 +
15593 +  /* Cancel any pending later-finish of this URB */
15594 +  if(urb_priv->later_data) {
15595 +    urb_priv->later_data->urb = NULL;
15596 +  }
15597 +
15598 +  /* For an IN pipe, we always set the actual length, regardless of whether
15599 +     there was an error or not (which means the device driver can use the data
15600 +     if it wants to). */
15601 +  if(usb_pipein(urb->pipe)) {
15602 +    urb->actual_length = urb_priv->rx_offset;
15603 +  } else {
15604 +    /* Set actual_length for OUT urbs also; the USB mass storage driver seems
15605 +       to want that. */
15606 +    if (status == 0 && urb->status == -EINPROGRESS) {
15607 +      urb->actual_length = urb->transfer_buffer_length;
15608 +    } else {
15609 +      /*  We wouldn't know of any partial writes if there was an error. */
15610 +      urb->actual_length = 0;
15611 +    }
15612 +  }
15613 +
15614 +
15615 +  /* URB status mangling */
15616 +  if(urb->status == -EINPROGRESS) {
15617 +    /* The USB core hasn't changed the status, let's set our finish status */
15618 +    urb->status = status;
15619 +
15620 +    if ((status == 0) && (urb->transfer_flags & URB_SHORT_NOT_OK) &&
15621 +       usb_pipein(urb->pipe) &&
15622 +       (urb->actual_length != urb->transfer_buffer_length)) {
15623 +      /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's
15624 +        max length) is to be treated as an error. */
15625 +      errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short"
15626 +               " data:%d\n", (unsigned int)urb, urb_num,
15627 +               urb->actual_length);
15628 +      urb->status = -EREMOTEIO;
15629 +    }
15630 +
15631 +    if(urb_priv->urb_state == UNLINK) {
15632 +      /* URB has been requested to be unlinked asynchronously */
15633 +      urb->status = -ECONNRESET;
15634 +      errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n",
15635 +               (unsigned int)urb, urb_num, urb->status);
15636 +    }
15637 +  } else {
15638 +    /* The USB Core wants to signal some error via the URB, pass it through */
15639 +  }
15640 +
15641 +  /* use completely different finish function for Isoc URBs */
15642 +  if(usb_pipeisoc(urb->pipe)) {
15643 +    tc_finish_isoc_urb(hcd, urb, status);
15644 +    return;
15645 +  }
15646 +
15647 +  /* Do special unlinking of EPs for Intr traffic */
15648 +  if(usb_pipeint(urb->pipe)) {
15649 +    tc_dma_unlink_intr_urb(urb);
15650 +  }
15651 +
15652 +  /* Release allocated bandwidth for periodic transfers */
15653 +  if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe))
15654 +    usb_release_bandwidth(urb->dev, urb, 0);
15655 +
15656 +  /* This URB is active on EP */
15657 +  if(urb == activeUrbList[epid]) {
15658 +    /* We need to fiddle with the toggle bits because the hardware doesn't do
15659 +       it for us. */
15660 +    toggle = etrax_epid_get_toggle(epid, usb_pipeout(urb->pipe));
15661 +    usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
15662 +                 usb_pipeout(urb->pipe), toggle);
15663 +
15664 +    /* Checks for Ctrl and Bulk EPs */
15665 +    switch(usb_pipetype(urb->pipe)) {
15666 +    case PIPE_BULK:
15667 +      /* Check so Bulk EP realy is disabled before finishing active URB  */
15668 +      ASSERT((TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
15669 +            IO_STATE(USB_EP_command, enable, no));
15670 +      /* Disable sub-pointer for EP to avoid next tx_interrupt() to
15671 +        process Bulk EP. */
15672 +      TxBulkEPList[epid].sub = 0;
15673 +      /* No need to wait for the DMA before changing the next pointer.
15674 +        The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
15675 +        the last one (INVALID_EPID) for actual traffic. */
15676 +      TxBulkEPList[epid].next = 
15677 +       virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
15678 +      break;
15679 +    case PIPE_CONTROL:
15680 +      /* Check so Ctrl EP realy is disabled before finishing active URB  */
15681 +      ASSERT((TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
15682 +            IO_STATE(USB_EP_command, enable, no));
15683 +      /* Disable sub-pointer for EP to avoid next tx_interrupt() to
15684 +        process Ctrl EP. */
15685 +      TxCtrlEPList[epid].sub = 0;
15686 +      break;
15687 +    }
15688 +  }
15689 +
15690 +  /* Free HC-private URB data*/
15691 +  urb_priv_free(hcd, urb);
15692 +
15693 +  if(urb->status) {
15694 +    errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
15695 +             (unsigned int)urb, urb_num, str_dir(urb->pipe),
15696 +             str_type(urb->pipe), urb->actual_length, urb->status);
15697 +  } else {
15698 +    tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
15699 +          (unsigned int)urb, urb_num, str_dir(urb->pipe),
15700 +          str_type(urb->pipe), urb->actual_length, urb->status);
15701 +  }
15702 +
15703 +  /* If we just finished an active URB, clear active pointer. */
15704 +  if (urb == activeUrbList[epid]) {
15705 +    /* Make URB not active on EP anymore */
15706 +    activeUrbList[epid] = NULL;
15707 +
15708 +    if(urb->status == 0) {
15709 +      /* URB finished sucessfully, process queue to see if there are any more
15710 +        URBs waiting before we call completion function.*/
15711 +      if(crisv10_hcd->running) {
15712 +       /* Only process queue if USB controller is running */
15713 +       tc_dma_process_queue(epid);
15714 +      } else {
15715 +       tc_warn("No processing of queue for epid:%d, USB Controller not"
15716 +               " running\n", epid);
15717 +      }
15718 +    }
15719 +  }
15720 +
15721 +  /*  Hand the URB from HCD to its USB device driver, using its completion
15722 +      functions */
15723 +  usb_hcd_giveback_urb (hcd, urb);
15724 +
15725 +  /* Check the queue once more if the URB returned with error, because we
15726 +     didn't do it before the completion function because the specification
15727 +     states that the queue should not restart until all it's unlinked
15728 +     URBs have been fully retired, with the completion functions run */
15729 +  if(crisv10_hcd->running) {
15730 +    /* Only process queue if USB controller is running */
15731 +    tc_dma_process_queue(epid);
15732 +  } else {
15733 +    tc_warn("No processing of queue for epid:%d, USB Controller not running\n",
15734 +           epid);
15735 +  }
15736 +
15737 +  DBFEXIT;
15738 +}
15739 +
15740 +static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
15741 +                              int status) {
15742 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
15743 +  int epid, i;
15744 +  volatile int timeout = 10000;
15745 +
15746 +  ASSERT(urb_priv);
15747 +  epid = urb_priv->epid;
15748 +
15749 +  ASSERT(usb_pipeisoc(urb->pipe));
15750 +
15751 +  /* Set that all isoc packets have status and length set before
15752 +     completing the urb. */
15753 +  for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++){
15754 +    urb->iso_frame_desc[i].actual_length = 0;
15755 +    urb->iso_frame_desc[i].status = -EPROTO;
15756 +  }
15757 +
15758 +  /* Check if the URB is currently active (done or error) */
15759 +  if(urb == activeUrbList[epid]) {
15760 +    /* Check if there are another In Isoc URB queued for this epid */
15761 +    if (!list_empty(&urb_list[epid])&& !epid_state[epid].disabled) {
15762 +      /* Move it from queue to active and mark it started so Isoc transfers
15763 +        won't be interrupted.
15764 +        All Isoc URBs data transfers are already added to DMA lists so we
15765 +        don't have to insert anything in DMA lists here. */
15766 +      activeUrbList[epid] = urb_list_first(epid);
15767 +      ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_state =
15768 +       STARTED;
15769 +      urb_list_del(activeUrbList[epid], epid);
15770 +
15771 +      if(urb->status) {
15772 +       errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
15773 +                 " status:%d, new waiting URB:0x%x[%d]\n",
15774 +                 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
15775 +                 str_type(urb->pipe), urb_priv->isoc_packet_counter,
15776 +                 urb->number_of_packets, urb->status,
15777 +                 (unsigned int)activeUrbList[epid],
15778 +                 ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_num);
15779 +      }
15780 +
15781 +    } else { /* No other URB queued for this epid */
15782 +      if(urb->status) {
15783 +       errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
15784 +                 " status:%d, no new URB waiting\n",
15785 +                 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
15786 +                 str_type(urb->pipe), urb_priv->isoc_packet_counter,
15787 +                 urb->number_of_packets, urb->status);
15788 +      }
15789 +
15790 +      /* Check if EP is still enabled, then shut it down. */
15791 +      if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
15792 +       isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid);
15793 +
15794 +       /* Should only occur for In Isoc EPs where SB isn't consumed. */
15795 +       ASSERT(usb_pipein(urb->pipe));
15796 +
15797 +       /* Disable it and wait for it to stop */
15798 +       TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
15799 +       
15800 +       /* Ah, the luxury of busy-wait. */
15801 +       while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
15802 +             (timeout-- > 0));
15803 +       if(timeout == 0) {
15804 +         warn("Timeout while waiting for DMA-TX-Isoc to leave EP for epid:%d\n", epid);
15805 +       }
15806 +      }
15807 +
15808 +      /* Unlink SB to say that epid is finished. */
15809 +      TxIsocEPList[epid].sub = 0;
15810 +      TxIsocEPList[epid].hw_len = 0;
15811 +
15812 +      /* No URB active for EP anymore */
15813 +      activeUrbList[epid] = NULL;
15814 +    }
15815 +  } else { /* Finishing of not active URB (queued up with SBs thought) */
15816 +    isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d,"
15817 +             " SB queued but not active\n",
15818 +             (unsigned int)urb, str_dir(urb->pipe),
15819 +             urb_priv->isoc_packet_counter, urb->number_of_packets,
15820 +             urb->status);
15821 +    if(usb_pipeout(urb->pipe)) {
15822 +      /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */
15823 +      struct USB_SB_Desc *iter_sb, *prev_sb, *next_sb;
15824 +
15825 +      iter_sb = TxIsocEPList[epid].sub ?
15826 +       phys_to_virt(TxIsocEPList[epid].sub) : 0;
15827 +      prev_sb = 0;
15828 +
15829 +      /* SB that is linked before this URBs first SB */
15830 +      while (iter_sb && (iter_sb != urb_priv->first_sb)) {
15831 +       prev_sb = iter_sb;
15832 +       iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
15833 +      }
15834 +
15835 +      if (iter_sb == 0) {
15836 +       /* Unlink of the URB currently being transmitted. */
15837 +       prev_sb = 0;
15838 +       iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
15839 +      }
15840 +
15841 +      while (iter_sb && (iter_sb != urb_priv->last_sb)) {
15842 +       iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
15843 +      }
15844 +
15845 +      if (iter_sb) {
15846 +       next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
15847 +      } else {
15848 +       /* This should only happen if the DMA has completed
15849 +          processing the SB list for this EP while interrupts
15850 +          are disabled. */
15851 +       isoc_dbg("Isoc urb not found, already sent?\n");
15852 +       next_sb = 0;
15853 +      }
15854 +      if (prev_sb) {
15855 +       prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
15856 +      } else {
15857 +       TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
15858 +      }
15859 +    }
15860 +  }
15861 +
15862 +  /* Free HC-private URB data*/
15863 +  urb_priv_free(hcd, urb);
15864 +
15865 +  usb_release_bandwidth(urb->dev, urb, 0);
15866 +
15867 +  /*  Hand the URB from HCD to its USB device driver, using its completion
15868 +      functions */
15869 +  usb_hcd_giveback_urb (hcd, urb);
15870 +}
15871 +
15872 +static __u32 urb_num = 0;
15873 +
15874 +/* allocate and initialize URB private data */
15875 +static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
15876 +                          int mem_flags) {
15877 +  struct crisv10_urb_priv *urb_priv;
15878 +  
15879 +  urb_priv = kmalloc(sizeof *urb_priv, mem_flags);
15880 +  if (!urb_priv)
15881 +    return -ENOMEM;
15882 +  memset(urb_priv, 0, sizeof *urb_priv);
15883 +
15884 +  urb_priv->epid = epid;
15885 +  urb_priv->urb_state = NOT_STARTED;
15886 +
15887 +  urb->hcpriv = urb_priv;
15888 +  /* Assign URB a sequence number, and increment counter */
15889 +  urb_priv->urb_num = urb_num;
15890 +  urb_num++;
15891 +  return 0;
15892 +}
15893 +
15894 +/* free URB private data */
15895 +static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb) {
15896 +  int i;
15897 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
15898 +  ASSERT(urb_priv != 0);
15899 +
15900 +  /* Check it has any SBs linked that needs to be freed*/
15901 +  if(urb_priv->first_sb != NULL) {
15902 +    struct USB_SB_Desc *next_sb, *first_sb, *last_sb;
15903 +    int i = 0;
15904 +    first_sb = urb_priv->first_sb;
15905 +    last_sb = urb_priv->last_sb;
15906 +    ASSERT(last_sb);
15907 +    while(first_sb != last_sb) {
15908 +      next_sb = (struct USB_SB_Desc *)phys_to_virt(first_sb->next);
15909 +      kmem_cache_free(usb_desc_cache, first_sb);
15910 +      first_sb = next_sb;
15911 +      i++;
15912 +    }
15913 +    kmem_cache_free(usb_desc_cache, last_sb);
15914 +    i++;
15915 +  }
15916 +
15917 +  /* Check if it has any EPs in its Intr pool that also needs to be freed */
15918 +  if(urb_priv->intr_ep_pool_length > 0) {
15919 +    for(i = 0; i < urb_priv->intr_ep_pool_length; i++) {
15920 +      kfree(urb_priv->intr_ep_pool[i]);
15921 +    }
15922 +    /*
15923 +    tc_dbg("Freed %d EPs from URB:0x%x EP pool\n",
15924 +            urb_priv->intr_ep_pool_length, (unsigned int)urb);
15925 +    */
15926 +  }
15927 +
15928 +  kfree(urb_priv);
15929 +  urb->hcpriv = NULL;
15930 +}
15931 +
15932 +static int ep_priv_create(struct usb_host_endpoint *ep, int mem_flags) {
15933 +  struct crisv10_ep_priv *ep_priv;
15934 +  
15935 +  ep_priv = kmalloc(sizeof *ep_priv, mem_flags);
15936 +  if (!ep_priv)
15937 +    return -ENOMEM;
15938 +  memset(ep_priv, 0, sizeof *ep_priv);
15939 +
15940 +  ep->hcpriv = ep_priv;
15941 +  return 0;
15942 +}
15943 +
15944 +static void ep_priv_free(struct usb_host_endpoint *ep) {
15945 +  struct crisv10_ep_priv *ep_priv = ep->hcpriv;
15946 +  ASSERT(ep_priv);
15947 +  kfree(ep_priv);
15948 +  ep->hcpriv = NULL;
15949 +}
15950 +
15951 +/* EPID handling functions, managing EP-list in Etrax through wrappers */
15952 +/* ------------------------------------------------------------------- */
15953 +
15954 +/* Sets up a new EPID for an endpoint or returns existing if found */
15955 +static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
15956 +                        int mem_flags) {
15957 +  int epid;
15958 +  char devnum, endpoint, out_traffic, slow;
15959 +  int maxlen;
15960 +  __u32 epid_data;
15961 +  struct crisv10_ep_priv *ep_priv = ep->hcpriv;
15962 +  
15963 +  DBFENTER;
15964 +  
15965 +  /* Check if a valid epid already is setup for this endpoint */
15966 +  if(ep_priv != NULL) {
15967 +    return ep_priv->epid;
15968 +  }
15969 +
15970 +  /* We must find and initiate a new epid for this urb. */
15971 +  epid = tc_allocate_epid();
15972 +  
15973 +  if (epid == -1) {
15974 +    /* Failed to allocate a new epid. */
15975 +    DBFEXIT;
15976 +    return epid;
15977 +  }
15978 +  
15979 +  /* We now have a new epid to use. Claim it. */
15980 +  epid_state[epid].inuse = 1;
15981 +  
15982 +  /* Init private data for new endpoint */
15983 +  if(ep_priv_create(ep, mem_flags) != 0) {
15984 +    return -ENOMEM;
15985 +  }
15986 +  ep_priv = ep->hcpriv;
15987 +  ep_priv->epid = epid;
15988 +
15989 +  devnum = usb_pipedevice(urb->pipe);
15990 +  endpoint = usb_pipeendpoint(urb->pipe);
15991 +  slow = (urb->dev->speed == USB_SPEED_LOW);
15992 +  maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
15993 +
15994 +  if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
15995 +    /* We want both IN and OUT control traffic to be put on the same
15996 +       EP/SB list. */
15997 +    out_traffic = 1;
15998 +  } else {
15999 +    out_traffic = usb_pipeout(urb->pipe);
16000 +  }
16001 +    
16002 +  if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
16003 +    epid_data = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) |
16004 +      /* FIXME: Change any to the actual port? */
16005 +      IO_STATE(R_USB_EPT_DATA_ISO, port, any) |
16006 +      IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) |
16007 +      IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) |
16008 +      IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum);
16009 +    etrax_epid_iso_set(epid, epid_data);
16010 +  } else {
16011 +    epid_data = IO_STATE(R_USB_EPT_DATA, valid, yes) |
16012 +      IO_FIELD(R_USB_EPT_DATA, low_speed, slow) |
16013 +      /* FIXME: Change any to the actual port? */
16014 +      IO_STATE(R_USB_EPT_DATA, port, any) |
16015 +      IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) |
16016 +      IO_FIELD(R_USB_EPT_DATA, ep, endpoint) |
16017 +      IO_FIELD(R_USB_EPT_DATA, dev, devnum);
16018 +    etrax_epid_set(epid, epid_data);
16019 +  }
16020 +  
16021 +  epid_state[epid].out_traffic = out_traffic;
16022 +  epid_state[epid].type = usb_pipetype(urb->pipe);
16023 +
16024 +  tc_warn("Setting up ep:0x%x epid:%d (addr:%d endp:%d max_len:%d %s %s %s)\n",
16025 +         (unsigned int)ep, epid, devnum, endpoint, maxlen,
16026 +         str_type(urb->pipe), out_traffic ? "out" : "in",
16027 +         slow ? "low" : "full");
16028 +
16029 +  /* Enable Isoc eof interrupt if we set up the first Isoc epid */
16030 +  if(usb_pipeisoc(urb->pipe)) {
16031 +    isoc_epid_counter++;
16032 +    if(isoc_epid_counter == 1) {
16033 +      isoc_warn("Enabled Isoc eof interrupt\n");
16034 +      *R_USB_IRQ_MASK_SET |= IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
16035 +    }
16036 +  }
16037 +
16038 +  DBFEXIT;
16039 +  return epid;
16040 +}
16041 +
16042 +static void tc_free_epid(struct usb_host_endpoint *ep) {
16043 +  unsigned long flags;
16044 +  struct crisv10_ep_priv *ep_priv = ep->hcpriv;
16045 +  int epid;
16046 +  volatile int timeout = 10000;
16047 +
16048 +  DBFENTER;
16049 +
16050 +  if (ep_priv == NULL) {
16051 +    tc_warn("Trying to free unused epid on ep:0x%x\n", (unsigned int)ep);
16052 +    DBFEXIT;
16053 +    return;
16054 +  }
16055 +
16056 +  epid = ep_priv->epid;
16057 +
16058 +  /* Disable Isoc eof interrupt if we free the last Isoc epid */
16059 +  if(epid_isoc(epid)) {
16060 +    ASSERT(isoc_epid_counter > 0);
16061 +    isoc_epid_counter--;
16062 +    if(isoc_epid_counter == 0) {
16063 +      *R_USB_IRQ_MASK_SET &= ~IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
16064 +      isoc_warn("Disabled Isoc eof interrupt\n");
16065 +    }
16066 +  }
16067 +
16068 +  /* Take lock manualy instead of in epid_x_x wrappers,
16069 +     because we need to be polling here */
16070 +  spin_lock_irqsave(&etrax_epid_lock, flags);
16071 +  
16072 +  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
16073 +  nop();
16074 +  while((*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) &&
16075 +       (timeout-- > 0));
16076 +  if(timeout == 0) {
16077 +    warn("Timeout while waiting for epid:%d to drop hold\n", epid);
16078 +  }
16079 +  /* This will, among other things, set the valid field to 0. */
16080 +  *R_USB_EPT_DATA = 0;
16081 +  spin_unlock_irqrestore(&etrax_epid_lock, flags);
16082 +  
16083 +  /* Free resource in software state info list */
16084 +  epid_state[epid].inuse = 0;
16085 +
16086 +  /* Free private endpoint data */
16087 +  ep_priv_free(ep);
16088 +  
16089 +  DBFEXIT;
16090 +}
16091 +
16092 +static int tc_allocate_epid(void) {
16093 +  int i;
16094 +  DBFENTER;
16095 +  for (i = 0; i < NBR_OF_EPIDS; i++) {
16096 +    if (!epid_inuse(i)) {
16097 +      DBFEXIT;
16098 +      return i;
16099 +    }
16100 +  }
16101 +  
16102 +  tc_warn("Found no free epids\n");
16103 +  DBFEXIT;
16104 +  return -1;
16105 +}
16106 +
16107 +
16108 +/* Wrappers around the list functions (include/linux/list.h). */
16109 +/* ---------------------------------------------------------- */
16110 +static inline int __urb_list_empty(int epid) {
16111 +  int retval;
16112 +  retval = list_empty(&urb_list[epid]);
16113 +  return retval;
16114 +}
16115 +
16116 +/* Returns first urb for this epid, or NULL if list is empty. */
16117 +static inline struct urb *urb_list_first(int epid) {
16118 +  unsigned long flags;
16119 +  struct urb *first_urb = 0;
16120 +  spin_lock_irqsave(&urb_list_lock, flags);
16121 +  if (!__urb_list_empty(epid)) {
16122 +    /* Get the first urb (i.e. head->next). */
16123 +    urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list);
16124 +    first_urb = urb_entry->urb;
16125 +  }
16126 +  spin_unlock_irqrestore(&urb_list_lock, flags);
16127 +  return first_urb;
16128 +}
16129 +
16130 +/* Adds an urb_entry last in the list for this epid. */
16131 +static inline void urb_list_add(struct urb *urb, int epid, int mem_flags) {
16132 +  unsigned long flags;
16133 +  urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), mem_flags);
16134 +  ASSERT(urb_entry);
16135 +  
16136 +  urb_entry->urb = urb;
16137 +  spin_lock_irqsave(&urb_list_lock, flags);
16138 +  list_add_tail(&urb_entry->list, &urb_list[epid]);
16139 +  spin_unlock_irqrestore(&urb_list_lock, flags);
16140 +}
16141 +
16142 +/* Search through the list for an element that contains this urb. (The list
16143 +   is expected to be short and the one we are about to delete will often be
16144 +   the first in the list.)
16145 +   Should be protected by spin_locks in calling function */
16146 +static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid) {
16147 +  struct list_head *entry;
16148 +  struct list_head *tmp;
16149 +  urb_entry_t *urb_entry;
16150 +  
16151 +  list_for_each_safe(entry, tmp, &urb_list[epid]) {
16152 +    urb_entry = list_entry(entry, urb_entry_t, list);
16153 +    ASSERT(urb_entry);
16154 +    ASSERT(urb_entry->urb);
16155 +    
16156 +    if (urb_entry->urb == urb) {
16157 +      return urb_entry;
16158 +    }
16159 +  }
16160 +  return 0;
16161 +}
16162 +
16163 +/* Same function as above but for global use. Protects list by spinlock */
16164 +static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid) {
16165 +  unsigned long flags;
16166 +  urb_entry_t *urb_entry;
16167 +  spin_lock_irqsave(&urb_list_lock, flags);
16168 +  urb_entry = __urb_list_entry(urb, epid);
16169 +  spin_unlock_irqrestore(&urb_list_lock, flags);
16170 +  return (urb_entry);
16171 +}
16172 +
16173 +/* Delete an urb from the list. */
16174 +static inline void urb_list_del(struct urb *urb, int epid) {
16175 +  unsigned long flags;
16176 +  urb_entry_t *urb_entry;
16177 +
16178 +  /* Delete entry and free. */
16179 +  spin_lock_irqsave(&urb_list_lock, flags);
16180 +  urb_entry = __urb_list_entry(urb, epid);
16181 +  ASSERT(urb_entry);
16182 +
16183 +  list_del(&urb_entry->list);
16184 +  spin_unlock_irqrestore(&urb_list_lock, flags);
16185 +  kfree(urb_entry);
16186 +}
16187 +
16188 +/* Move an urb to the end of the list. */
16189 +static inline void urb_list_move_last(struct urb *urb, int epid) {
16190 +  unsigned long flags;
16191 +  urb_entry_t *urb_entry;
16192 +  
16193 +  spin_lock_irqsave(&urb_list_lock, flags);
16194 +  urb_entry = __urb_list_entry(urb, epid);
16195 +  ASSERT(urb_entry);
16196 +
16197 +  list_del(&urb_entry->list);
16198 +  list_add_tail(&urb_entry->list, &urb_list[epid]);
16199 +  spin_unlock_irqrestore(&urb_list_lock, flags);
16200 +}
16201 +
16202 +/* Get the next urb in the list. */
16203 +static inline struct urb *urb_list_next(struct urb *urb, int epid) {
16204 +  unsigned long flags;
16205 +  urb_entry_t *urb_entry;
16206 +
16207 +  spin_lock_irqsave(&urb_list_lock, flags);
16208 +  urb_entry = __urb_list_entry(urb, epid);
16209 +  ASSERT(urb_entry);
16210 +
16211 +  if (urb_entry->list.next != &urb_list[epid]) {
16212 +    struct list_head *elem = urb_entry->list.next;
16213 +    urb_entry = list_entry(elem, urb_entry_t, list);
16214 +    spin_unlock_irqrestore(&urb_list_lock, flags);
16215 +    return urb_entry->urb;
16216 +  } else {
16217 +    spin_unlock_irqrestore(&urb_list_lock, flags);
16218 +    return NULL;
16219 +  }
16220 +}
16221 +
16222 +struct USB_EP_Desc* create_ep(int epid, struct USB_SB_Desc* sb_desc,
16223 +                             int mem_flags) {
16224 +  struct USB_EP_Desc *ep_desc;
16225 +  ep_desc = (struct USB_EP_Desc *) kmem_cache_alloc(usb_desc_cache, mem_flags);
16226 +  if(ep_desc == NULL)
16227 +    return NULL;
16228 +  memset(ep_desc, 0, sizeof(struct USB_EP_Desc));
16229 +
16230 +  ep_desc->hw_len = 0;
16231 +  ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) |
16232 +                     IO_STATE(USB_EP_command, enable, yes));
16233 +  if(sb_desc == NULL) {
16234 +    ep_desc->sub = 0;
16235 +  } else {
16236 +    ep_desc->sub = virt_to_phys(sb_desc);
16237 +  }
16238 +  return ep_desc;
16239 +}
16240 +
16241 +#define TT_ZOUT  0
16242 +#define TT_IN    1
16243 +#define TT_OUT   2
16244 +#define TT_SETUP 3
16245 +
16246 +#define CMD_EOL  IO_STATE(USB_SB_command, eol, yes)
16247 +#define CMD_INTR IO_STATE(USB_SB_command, intr, yes)
16248 +#define CMD_FULL IO_STATE(USB_SB_command, full, yes)
16249 +
16250 +/* Allocation and setup of a generic SB. Used to create SETUP, OUT and ZOUT
16251 +   SBs. Also used by create_sb_in() to avoid same allocation procedure at two
16252 +   places */
16253 +struct USB_SB_Desc* create_sb(struct USB_SB_Desc* sb_prev, int tt, void* data,
16254 +                             int datalen, int mem_flags) {
16255 +  struct USB_SB_Desc *sb_desc;
16256 +  sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
16257 +  if(sb_desc == NULL)
16258 +    return NULL;
16259 +  memset(sb_desc, 0, sizeof(struct USB_SB_Desc));
16260 +
16261 +  sb_desc->command = IO_FIELD(USB_SB_command, tt, tt) |
16262 +                     IO_STATE(USB_SB_command, eot, yes);
16263 +
16264 +  sb_desc->sw_len = datalen;
16265 +  if(data != NULL) {
16266 +    sb_desc->buf = virt_to_phys(data);
16267 +  } else {
16268 +    sb_desc->buf = 0;
16269 +  }
16270 +  if(sb_prev != NULL) {
16271 +    sb_prev->next = virt_to_phys(sb_desc);
16272 +  }
16273 +  return sb_desc;
16274 +}
16275 +
16276 +/* Creates a copy of an existing SB by allocation space for it and copy
16277 +   settings */
16278 +struct USB_SB_Desc* create_sb_copy(struct USB_SB_Desc* sb_orig, int mem_flags) {
16279 +  struct USB_SB_Desc *sb_desc;
16280 +  sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
16281 +  if(sb_desc == NULL)
16282 +    return NULL;
16283 +
16284 +  memcpy(sb_desc, sb_orig, sizeof(struct USB_SB_Desc));
16285 +  return sb_desc;
16286 +}
16287 +
16288 +/* A specific create_sb function for creation of in SBs. This is due to
16289 +   that datalen in In SBs shows how many packets we are expecting. It also
16290 +   sets up the rem field to show if how many bytes we expect in last packet
16291 +   if it's not a full one */
16292 +struct USB_SB_Desc* create_sb_in(struct USB_SB_Desc* sb_prev, int datalen,
16293 +                                int maxlen, int mem_flags) {
16294 +  struct USB_SB_Desc *sb_desc;
16295 +  sb_desc = create_sb(sb_prev, TT_IN, NULL,
16296 +                     datalen ? (datalen - 1) / maxlen + 1 : 0, mem_flags);
16297 +  if(sb_desc == NULL)
16298 +    return NULL;
16299 +  sb_desc->command |= IO_FIELD(USB_SB_command, rem, datalen % maxlen);
16300 +  return sb_desc;
16301 +}
16302 +
16303 +void set_sb_cmds(struct USB_SB_Desc *sb_desc, __u16 flags) {
16304 +  sb_desc->command |= flags;
16305 +}
16306 +
16307 +int create_sb_for_urb(struct urb *urb, int mem_flags) {
16308 +  int is_out = !usb_pipein(urb->pipe);
16309 +  int type = usb_pipetype(urb->pipe);
16310 +  int maxlen = usb_maxpacket(urb->dev, urb->pipe, is_out);
16311 +  int buf_len = urb->transfer_buffer_length;
16312 +  void *buf = buf_len > 0 ? urb->transfer_buffer : NULL;
16313 +  struct USB_SB_Desc *sb_desc = NULL;
16314 +
16315 +  struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
16316 +  ASSERT(urb_priv != NULL);
16317 +
16318 +  switch(type) {
16319 +  case PIPE_CONTROL:
16320 +    /* Setup stage */
16321 +    sb_desc = create_sb(NULL, TT_SETUP, urb->setup_packet, 8, mem_flags);
16322 +    if(sb_desc == NULL)
16323 +      return -ENOMEM;
16324 +    set_sb_cmds(sb_desc, CMD_FULL);
16325 +
16326 +    /* Attach first SB to URB */
16327 +    urb_priv->first_sb = sb_desc;    
16328 +
16329 +    if (is_out) { /* Out Control URB */
16330 +      /* If this Control OUT transfer has an optional data stage we add
16331 +        an OUT token before the mandatory IN (status) token */
16332 +      if ((buf_len > 0) && buf) {
16333 +       sb_desc = create_sb(sb_desc, TT_OUT, buf, buf_len, mem_flags);
16334 +       if(sb_desc == NULL)
16335 +         return -ENOMEM;
16336 +       set_sb_cmds(sb_desc, CMD_FULL);
16337 +      }
16338 +
16339 +      /* Status stage */
16340 +      /* The data length has to be exactly 1. This is due to a requirement
16341 +         of the USB specification that a host must be prepared to receive
16342 +         data in the status phase */
16343 +      sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
16344 +      if(sb_desc == NULL)
16345 +       return -ENOMEM;
16346 +    } else { /* In control URB */
16347 +      /* Data stage */
16348 +      sb_desc = create_sb_in(sb_desc, buf_len, maxlen, mem_flags);
16349 +      if(sb_desc == NULL)
16350 +       return -ENOMEM;
16351 +
16352 +      /* Status stage */
16353 +      /* Read comment at zout_buffer declaration for an explanation to this. */
16354 +      sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
16355 +      if(sb_desc == NULL)
16356 +       return -ENOMEM;
16357 +      /* Set descriptor interrupt flag for in URBs so we can finish URB after
16358 +         zout-packet has been sent */
16359 +      set_sb_cmds(sb_desc, CMD_INTR | CMD_FULL);
16360 +    }
16361 +    /* Set end-of-list flag in last SB */
16362 +    set_sb_cmds(sb_desc, CMD_EOL);
16363 +    /* Attach last SB to URB */
16364 +    urb_priv->last_sb = sb_desc;
16365 +    break;
16366 +
16367 +  case PIPE_BULK:
16368 +    if (is_out) { /* Out Bulk URB */
16369 +      sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
16370 +      if(sb_desc == NULL)
16371 +       return -ENOMEM;
16372 +      /* The full field is set to yes, even if we don't actually check that
16373 +        this is a full-length transfer (i.e., that transfer_buffer_length %
16374 +        maxlen = 0).
16375 +        Setting full prevents the USB controller from sending an empty packet
16376 +        in that case.  However, if URB_ZERO_PACKET was set we want that. */
16377 +      if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
16378 +       set_sb_cmds(sb_desc, CMD_FULL);
16379 +      }
16380 +    } else { /* In Bulk URB */
16381 +      sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
16382 +      if(sb_desc == NULL)
16383 +       return -ENOMEM;
16384 +    }
16385 +    /* Set end-of-list flag for last SB */
16386 +    set_sb_cmds(sb_desc, CMD_EOL);
16387 +
16388 +    /* Attach SB to URB */
16389 +    urb_priv->first_sb = sb_desc;
16390 +    urb_priv->last_sb = sb_desc;
16391 +    break;
16392 +
16393 +  case PIPE_INTERRUPT:
16394 +    if(is_out) { /* Out Intr URB */
16395 +      sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
16396 +      if(sb_desc == NULL)
16397 +       return -ENOMEM;
16398 +
16399 +      /* The full field is set to yes, even if we don't actually check that
16400 +        this is a full-length transfer (i.e., that transfer_buffer_length %
16401 +        maxlen = 0).
16402 +        Setting full prevents the USB controller from sending an empty packet
16403 +        in that case.  However, if URB_ZERO_PACKET was set we want that. */
16404 +      if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
16405 +       set_sb_cmds(sb_desc, CMD_FULL);
16406 +      }
16407 +      /* Only generate TX interrupt if it's a Out URB*/
16408 +      set_sb_cmds(sb_desc, CMD_INTR);
16409 +
16410 +    } else { /* In Intr URB */
16411 +      sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
16412 +      if(sb_desc == NULL)
16413 +       return -ENOMEM;
16414 +    }
16415 +    /* Set end-of-list flag for last SB */
16416 +    set_sb_cmds(sb_desc, CMD_EOL);
16417 +
16418 +    /* Attach SB to URB */
16419 +    urb_priv->first_sb = sb_desc;
16420 +    urb_priv->last_sb = sb_desc;
16421 +
16422 +    break;
16423 +  case PIPE_ISOCHRONOUS:
16424 +    if(is_out) { /* Out Isoc URB */
16425 +      int i;
16426 +      if(urb->number_of_packets == 0) {
16427 +       tc_err("Can't create SBs for Isoc URB with zero packets\n");
16428 +       return -EPIPE;
16429 +      }
16430 +      /* Create one SB descriptor for each packet and link them together. */
16431 +      for(i = 0; i < urb->number_of_packets; i++) {
16432 +       if (urb->iso_frame_desc[i].length > 0) {
16433 +
16434 +         sb_desc = create_sb(sb_desc, TT_OUT, urb->transfer_buffer +
16435 +                             urb->iso_frame_desc[i].offset,
16436 +                             urb->iso_frame_desc[i].length, mem_flags);
16437 +         if(sb_desc == NULL)
16438 +           return -ENOMEM;
16439 +
16440 +         /* Check if it's a full length packet */
16441 +         if (urb->iso_frame_desc[i].length ==
16442 +             usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
16443 +           set_sb_cmds(sb_desc, CMD_FULL);
16444 +         }
16445 +         
16446 +       } else { /* zero length packet */
16447 +         sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
16448 +         if(sb_desc == NULL)
16449 +           return -ENOMEM;
16450 +         set_sb_cmds(sb_desc, CMD_FULL);
16451 +       }
16452 +       /* Attach first SB descriptor to URB */
16453 +       if (i == 0) {
16454 +         urb_priv->first_sb = sb_desc;
16455 +       }
16456 +      }
16457 +      /* Set interrupt and end-of-list flags in last SB */
16458 +      set_sb_cmds(sb_desc, CMD_INTR | CMD_EOL);
16459 +      /* Attach last SB descriptor to URB */
16460 +      urb_priv->last_sb = sb_desc;
16461 +      tc_dbg("Created %d out SBs for Isoc URB:0x%x\n",
16462 +              urb->number_of_packets, (unsigned int)urb);
16463 +    } else { /* In Isoc URB */
16464 +      /* Actual number of packets is not relevant for periodic in traffic as
16465 +        long as it is more than zero.  Set to 1 always. */
16466 +      sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
16467 +      if(sb_desc == NULL)
16468 +       return -ENOMEM;
16469 +      /* Set end-of-list flags for SB */
16470 +      set_sb_cmds(sb_desc, CMD_EOL);
16471 +
16472 +      /* Attach SB to URB */
16473 +      urb_priv->first_sb = sb_desc;
16474 +      urb_priv->last_sb = sb_desc;
16475 +    }
16476 +    break;
16477 +  default:
16478 +    tc_err("Unknown pipe-type\n");
16479 +    return -EPIPE;
16480 +    break;
16481 +  }
16482 +  return 0;
16483 +}
16484 +
16485 +int init_intr_urb(struct urb *urb, int mem_flags) {
16486 +  struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
16487 +  struct USB_EP_Desc* ep_desc;
16488 +  int interval;
16489 +  int i;
16490 +  int ep_count;
16491 +
16492 +  ASSERT(urb_priv != NULL);
16493 +  ASSERT(usb_pipeint(urb->pipe));
16494 +  /* We can't support interval longer than amount of eof descriptors in
16495 +     TxIntrEPList */
16496 +  if(urb->interval > MAX_INTR_INTERVAL) {
16497 +    tc_err("Interrupt interval %dms too big (max: %dms)\n", urb->interval,
16498 +          MAX_INTR_INTERVAL);
16499 +    return -EINVAL;
16500 +  }
16501 +
16502 +  /* We assume that the SB descriptors already have been setup */
16503 +  ASSERT(urb_priv->first_sb != NULL);
16504 +
16505 +  /* Round of the interval to 2^n, it is obvious that this code favours
16506 +     smaller numbers, but that is actually a good thing */
16507 +  /* FIXME: The "rounding error" for larger intervals will be quite
16508 +     large. For in traffic this shouldn't be a problem since it will only
16509 +     mean that we "poll" more often. */
16510 +  interval = urb->interval;
16511 +  for (i = 0; interval; i++) {
16512 +    interval = interval >> 1;
16513 +  }
16514 +  urb_priv->interval = 1 << (i - 1);
16515 +
16516 +  /* We can only have max interval for Out Interrupt due to that we can only
16517 +     handle one linked in EP for a certain epid in the Intr descr array at the
16518 +     time. The USB Controller in the Etrax 100LX continues to process Intr EPs
16519 +     so we have no way of knowing which one that caused the actual transfer if
16520 +     we have several linked in. */
16521 +  if(usb_pipeout(urb->pipe)) {
16522 +    urb_priv->interval = MAX_INTR_INTERVAL;
16523 +  }
16524 +
16525 +  /* Calculate amount of EPs needed */
16526 +  ep_count = MAX_INTR_INTERVAL / urb_priv->interval;
16527 +
16528 +  for(i = 0; i < ep_count; i++) {
16529 +    ep_desc = create_ep(urb_priv->epid, urb_priv->first_sb, mem_flags);
16530 +    if(ep_desc == NULL) {
16531 +      /* Free any descriptors that we may have allocated before failure */
16532 +      while(i > 0) {
16533 +       i--;
16534 +       kfree(urb_priv->intr_ep_pool[i]);
16535 +      }
16536 +      return -ENOMEM;
16537 +    }
16538 +    urb_priv->intr_ep_pool[i] = ep_desc;
16539 +  }
16540 +  urb_priv->intr_ep_pool_length = ep_count;
16541 +  return 0;
16542 +}
16543 +
16544 +/* DMA RX/TX functions */
16545 +/* ----------------------- */
16546 +
16547 +static void tc_dma_init_rx_list(void) {
16548 +  int i;
16549 +
16550 +  /* Setup descriptor list except last one */
16551 +  for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) {
16552 +    RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
16553 +    RxDescList[i].command = 0;
16554 +    RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]);
16555 +    RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
16556 +    RxDescList[i].hw_len = 0;
16557 +    RxDescList[i].status = 0;
16558 +    
16559 +    /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as
16560 +       USB_IN_Desc for the relevant fields.) */
16561 +    prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]);
16562 +    
16563 +  }
16564 +  /* Special handling of last descriptor */
16565 +  RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
16566 +  RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes);
16567 +  RxDescList[i].next = virt_to_phys(&RxDescList[0]);
16568 +  RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
16569 +  RxDescList[i].hw_len = 0;
16570 +  RxDescList[i].status = 0;
16571 +  
16572 +  /* Setup list pointers that show progress in list */
16573 +  myNextRxDesc = &RxDescList[0];
16574 +  myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
16575 +  
16576 +  flush_etrax_cache();
16577 +  /* Point DMA to first descriptor in list and start it */
16578 +  *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc);
16579 +  *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start);
16580 +}
16581 +
16582 +
16583 +static void tc_dma_init_tx_bulk_list(void) {
16584 +  int i;
16585 +  volatile struct USB_EP_Desc *epDescr;
16586 +
16587 +  for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
16588 +    epDescr = &(TxBulkEPList[i]);
16589 +    CHECK_ALIGN(epDescr);
16590 +    epDescr->hw_len = 0;
16591 +    epDescr->command = IO_FIELD(USB_EP_command, epid, i);
16592 +    epDescr->sub = 0;
16593 +    epDescr->next = virt_to_phys(&TxBulkEPList[i + 1]);
16594 +
16595 +    /* Initiate two EPs, disabled and with the eol flag set. No need for any
16596 +       preserved epid. */
16597 +    
16598 +    /* The first one has the intr flag set so we get an interrupt when the DMA
16599 +       channel is about to become disabled. */
16600 +    CHECK_ALIGN(&TxBulkDummyEPList[i][0]);
16601 +    TxBulkDummyEPList[i][0].hw_len = 0;
16602 +    TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
16603 +                                      IO_STATE(USB_EP_command, eol, yes) |
16604 +                                      IO_STATE(USB_EP_command, intr, yes));
16605 +    TxBulkDummyEPList[i][0].sub = 0;
16606 +    TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]);
16607 +    
16608 +    /* The second one. */
16609 +    CHECK_ALIGN(&TxBulkDummyEPList[i][1]);
16610 +    TxBulkDummyEPList[i][1].hw_len = 0;
16611 +    TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
16612 +                                      IO_STATE(USB_EP_command, eol, yes));
16613 +    TxBulkDummyEPList[i][1].sub = 0;
16614 +    /* The last dummy's next pointer is the same as the current EP's next pointer. */
16615 +    TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]);
16616 +  }
16617 +
16618 +  /* Special handling of last descr in list, make list circular */
16619 +  epDescr = &TxBulkEPList[i];
16620 +  CHECK_ALIGN(epDescr);
16621 +  epDescr->hw_len = 0;
16622 +  epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
16623 +    IO_FIELD(USB_EP_command, epid, i);
16624 +  epDescr->sub = 0;
16625 +  epDescr->next = virt_to_phys(&TxBulkEPList[0]);
16626 +  
16627 +  /* Init DMA sub-channel pointers to last item in each list */
16628 +  *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]);
16629 +  /* No point in starting the bulk channel yet.
16630 +   *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
16631 +}
16632 +
16633 +static void tc_dma_init_tx_ctrl_list(void) {
16634 +  int i;
16635 +  volatile struct USB_EP_Desc *epDescr;
16636 +
16637 +  for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
16638 +    epDescr = &(TxCtrlEPList[i]);
16639 +    CHECK_ALIGN(epDescr);
16640 +    epDescr->hw_len = 0;
16641 +    epDescr->command = IO_FIELD(USB_EP_command, epid, i);
16642 +    epDescr->sub = 0;
16643 +    epDescr->next = virt_to_phys(&TxCtrlEPList[i + 1]);
16644 +  }
16645 +  /* Special handling of last descr in list, make list circular */
16646 +  epDescr = &TxCtrlEPList[i];
16647 +  CHECK_ALIGN(epDescr);
16648 +  epDescr->hw_len = 0;
16649 +  epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
16650 +    IO_FIELD(USB_EP_command, epid, i);
16651 +  epDescr->sub = 0;
16652 +  epDescr->next = virt_to_phys(&TxCtrlEPList[0]);
16653 +  
16654 +  /* Init DMA sub-channel pointers to last item in each list */
16655 +  *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[i]);
16656 +  /* No point in starting the ctrl channel yet.
16657 +   *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
16658 +}
16659 +
16660 +
16661 +static void tc_dma_init_tx_intr_list(void) {
16662 +  int i;
16663 +
16664 +  TxIntrSB_zout.sw_len = 1;
16665 +  TxIntrSB_zout.next = 0;
16666 +  TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]);
16667 +  TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
16668 +                          IO_STATE(USB_SB_command, tt, zout) |
16669 +                          IO_STATE(USB_SB_command, full, yes) |
16670 +                          IO_STATE(USB_SB_command, eot, yes) |
16671 +                          IO_STATE(USB_SB_command, eol, yes));
16672 +  
16673 +  for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) {
16674 +    CHECK_ALIGN(&TxIntrEPList[i]);
16675 +    TxIntrEPList[i].hw_len = 0;
16676 +    TxIntrEPList[i].command =
16677 +      (IO_STATE(USB_EP_command, eof, yes) |
16678 +       IO_STATE(USB_EP_command, enable, yes) |
16679 +       IO_FIELD(USB_EP_command, epid, INVALID_EPID));
16680 +    TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
16681 +    TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]);
16682 +  }
16683 +
16684 +  /* Special handling of last descr in list, make list circular */
16685 +  CHECK_ALIGN(&TxIntrEPList[i]);
16686 +  TxIntrEPList[i].hw_len = 0;
16687 +  TxIntrEPList[i].command =
16688 +    (IO_STATE(USB_EP_command, eof, yes) |
16689 +     IO_STATE(USB_EP_command, eol, yes) |
16690 +     IO_STATE(USB_EP_command, enable, yes) |
16691 +     IO_FIELD(USB_EP_command, epid, INVALID_EPID));
16692 +  TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
16693 +  TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]);
16694 +
16695 +  intr_dbg("Initiated Intr EP descriptor list\n");
16696 +
16697 +
16698 +  /* Connect DMA 8 sub-channel 2 to first in list */
16699 +  *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]);
16700 +}
16701 +
16702 +static void tc_dma_init_tx_isoc_list(void) {
16703 +  int i;
16704 +
16705 +  DBFENTER;
16706 +
16707 +  /* Read comment at zout_buffer declaration for an explanation to this. */
16708 +  TxIsocSB_zout.sw_len = 1;
16709 +  TxIsocSB_zout.next = 0;
16710 +  TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]);
16711 +  TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
16712 +                          IO_STATE(USB_SB_command, tt, zout) |
16713 +                          IO_STATE(USB_SB_command, full, yes) |
16714 +                          IO_STATE(USB_SB_command, eot, yes) |
16715 +                          IO_STATE(USB_SB_command, eol, yes));
16716 +
16717 +  /* The last isochronous EP descriptor is a dummy. */
16718 +  for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
16719 +    CHECK_ALIGN(&TxIsocEPList[i]);
16720 +    TxIsocEPList[i].hw_len = 0;
16721 +    TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
16722 +    TxIsocEPList[i].sub = 0;
16723 +    TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]);
16724 +  }
16725 +
16726 +  CHECK_ALIGN(&TxIsocEPList[i]);
16727 +  TxIsocEPList[i].hw_len = 0;
16728 +
16729 +  /* Must enable the last EP descr to get eof interrupt. */
16730 +  TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) |
16731 +                            IO_STATE(USB_EP_command, eof, yes) |
16732 +                            IO_STATE(USB_EP_command, eol, yes) |
16733 +                            IO_FIELD(USB_EP_command, epid, INVALID_EPID));
16734 +  TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout);
16735 +  TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]);
16736 +
16737 +  *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]);
16738 +  *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
16739 +}
16740 +
16741 +static int tc_dma_init(struct usb_hcd *hcd) {
16742 +  tc_dma_init_rx_list();
16743 +  tc_dma_init_tx_bulk_list();
16744 +  tc_dma_init_tx_ctrl_list();
16745 +  tc_dma_init_tx_intr_list();
16746 +  tc_dma_init_tx_isoc_list();
16747 +
16748 +  if (cris_request_dma(USB_TX_DMA_NBR,
16749 +                      "ETRAX 100LX built-in USB (Tx)",
16750 +                      DMA_VERBOSE_ON_ERROR,
16751 +                      dma_usb)) {
16752 +    err("Could not allocate DMA ch 8 for USB");
16753 +    return -EBUSY;
16754 +  }
16755 +       
16756 +  if (cris_request_dma(USB_RX_DMA_NBR,
16757 +                      "ETRAX 100LX built-in USB (Rx)",
16758 +                      DMA_VERBOSE_ON_ERROR,
16759 +                      dma_usb)) {
16760 +    err("Could not allocate DMA ch 9 for USB");
16761 +    return -EBUSY;
16762 +  }
16763 +
16764 +  *R_IRQ_MASK2_SET =
16765 +    /* Note that these interrupts are not used. */
16766 +    IO_STATE(R_IRQ_MASK2_SET, dma8_sub0_descr, set) |
16767 +    /* Sub channel 1 (ctrl) descr. interrupts are used. */
16768 +    IO_STATE(R_IRQ_MASK2_SET, dma8_sub1_descr, set) |
16769 +    IO_STATE(R_IRQ_MASK2_SET, dma8_sub2_descr, set) |
16770 +    /* Sub channel 3 (isoc) descr. interrupts are used. */
16771 +    IO_STATE(R_IRQ_MASK2_SET, dma8_sub3_descr, set);
16772 +  
16773 +  /* Note that the dma9_descr interrupt is not used. */
16774 +  *R_IRQ_MASK2_SET =
16775 +    IO_STATE(R_IRQ_MASK2_SET, dma9_eop, set) |
16776 +    IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set);
16777 +
16778 +  if (request_irq(ETRAX_USB_RX_IRQ, tc_dma_rx_interrupt, 0,
16779 +                 "ETRAX 100LX built-in USB (Rx)", hcd)) {
16780 +    err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ);
16781 +    return -EBUSY;
16782 +  }
16783 +  
16784 +  if (request_irq(ETRAX_USB_TX_IRQ, tc_dma_tx_interrupt, 0,
16785 +                 "ETRAX 100LX built-in USB (Tx)", hcd)) {
16786 +    err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ);
16787 +    return -EBUSY;
16788 +  }
16789 +
16790 +  return 0;
16791 +}
16792 +
16793 +static void tc_dma_destroy(void) {
16794 +  free_irq(ETRAX_USB_RX_IRQ, NULL);
16795 +  free_irq(ETRAX_USB_TX_IRQ, NULL);
16796 +
16797 +  cris_free_dma(USB_TX_DMA_NBR, "ETRAX 100LX built-in USB (Tx)");
16798 +  cris_free_dma(USB_RX_DMA_NBR, "ETRAX 100LX built-in USB (Rx)");
16799 +
16800 +}
16801 +
16802 +static void tc_dma_link_intr_urb(struct urb *urb);
16803 +
16804 +/* Handle processing of Bulk, Ctrl and Intr queues */
16805 +static void tc_dma_process_queue(int epid) {
16806 +  struct urb *urb;
16807 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
16808 +  unsigned long flags;
16809 +  char toggle;
16810 +
16811 +  if(epid_state[epid].disabled) {
16812 +    /* Don't process any URBs on a disabled endpoint */
16813 +    return;
16814 +  }
16815 +
16816 +  /* Do not disturb us while fiddling with EPs and epids */
16817 +  local_irq_save(flags);
16818 +
16819 +  /* For bulk, Ctrl and Intr can we only have one URB active at a time for
16820 +     a specific EP. */
16821 +  if(activeUrbList[epid] != NULL) {
16822 +    /* An URB is already active on EP, skip checking queue */
16823 +    local_irq_restore(flags);
16824 +    return;
16825 +  }
16826 +
16827 +  urb = urb_list_first(epid);
16828 +  if(urb == NULL) {
16829 +    /* No URB waiting in EP queue. Nothing do to */
16830 +    local_irq_restore(flags);
16831 +    return;
16832 +  }
16833 +
16834 +  urb_priv = urb->hcpriv;
16835 +  ASSERT(urb_priv != NULL);
16836 +  ASSERT(urb_priv->urb_state == NOT_STARTED);
16837 +  ASSERT(!usb_pipeisoc(urb->pipe));
16838 +
16839 +  /* Remove this URB from the queue and move it to active */
16840 +  activeUrbList[epid] = urb;
16841 +  urb_list_del(urb, epid);
16842 +
16843 +  urb_priv->urb_state = STARTED;
16844 +
16845 +  /* Reset error counters (regardless of which direction this traffic is). */
16846 +  etrax_epid_clear_error(epid);
16847 +
16848 +  /* Special handling of Intr EP lists */
16849 +  if(usb_pipeint(urb->pipe)) {
16850 +    tc_dma_link_intr_urb(urb);
16851 +    local_irq_restore(flags);
16852 +    return;
16853 +  }
16854 +
16855 +  /* Software must preset the toggle bits for Bulk and Ctrl */
16856 +  if(usb_pipecontrol(urb->pipe)) {
16857 +    /* Toggle bits are initialized only during setup transaction in a
16858 +       CTRL transfer */
16859 +    etrax_epid_set_toggle(epid, 0, 0);
16860 +    etrax_epid_set_toggle(epid, 1, 0);
16861 +  } else {
16862 +    toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
16863 +                          usb_pipeout(urb->pipe));
16864 +    etrax_epid_set_toggle(epid, usb_pipeout(urb->pipe), toggle);
16865 +  }
16866 +
16867 +  tc_dbg("Added SBs from (URB:0x%x %s %s) to epid %d: %s\n",
16868 +        (unsigned int)urb, str_dir(urb->pipe), str_type(urb->pipe), epid,
16869 +        sblist_to_str(urb_priv->first_sb));
16870 +
16871 +  /* We start the DMA sub channel without checking if it's running or not,
16872 +     because:
16873 +     1) If it's already running, issuing the start command is a nop.
16874 +     2) We avoid a test-and-set race condition. */
16875 +  switch(usb_pipetype(urb->pipe)) {
16876 +  case PIPE_BULK:
16877 +    /* Assert that the EP descriptor is disabled. */
16878 +    ASSERT(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
16879 +
16880 +    /* Set up and enable the EP descriptor. */
16881 +    TxBulkEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
16882 +    TxBulkEPList[epid].hw_len = 0;
16883 +    TxBulkEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
16884 +
16885 +    /* Check if the dummy list is already with us (if several urbs were queued). */
16886 +    if (usb_pipein(urb->pipe) && (TxBulkEPList[epid].next != virt_to_phys(&TxBulkDummyEPList[epid][0]))) {
16887 +      tc_dbg("Inviting dummy list to the party for urb 0x%lx, epid %d", 
16888 +            (unsigned long)urb, epid);
16889 +      
16890 +      /* We don't need to check if the DMA is at this EP or not before changing the
16891 +        next pointer, since we will do it in one 32-bit write (EP descriptors are
16892 +        32-bit aligned). */
16893 +      TxBulkEPList[epid].next = virt_to_phys(&TxBulkDummyEPList[epid][0]);
16894 +    }
16895 +
16896 +    restart_dma8_sub0();
16897 +
16898 +    /* Update/restart the bulk start timer since we just started the channel.*/
16899 +    mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
16900 +    /* Update/restart the bulk eot timer since we just inserted traffic. */
16901 +    mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
16902 +    break;
16903 +  case PIPE_CONTROL:
16904 +    /* Assert that the EP descriptor is disabled. */
16905 +    ASSERT(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
16906 +
16907 +    /* Set up and enable the EP descriptor. */
16908 +    TxCtrlEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
16909 +    TxCtrlEPList[epid].hw_len = 0;
16910 +    TxCtrlEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
16911 +
16912 +    *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
16913 +    break;
16914 +  }
16915 +  local_irq_restore(flags);
16916 +}
16917 +
16918 +static void tc_dma_link_intr_urb(struct urb *urb) {
16919 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
16920 +  volatile struct USB_EP_Desc *tmp_ep;
16921 +  struct USB_EP_Desc *ep_desc;
16922 +  int i = 0, epid;
16923 +  int pool_idx = 0;
16924 +
16925 +  ASSERT(urb_priv != NULL);
16926 +  epid = urb_priv->epid;
16927 +  ASSERT(urb_priv->interval > 0);
16928 +  ASSERT(urb_priv->intr_ep_pool_length > 0);
16929 +
16930 +  tmp_ep = &TxIntrEPList[0];
16931 +
16932 +  /* Only insert one EP descriptor in list for Out Intr URBs.
16933 +     We can only handle Out Intr with interval of 128ms because
16934 +     it's not possible to insert several Out Intr EPs because they
16935 +     are not consumed by the DMA. */
16936 +  if(usb_pipeout(urb->pipe)) {
16937 +    ep_desc = urb_priv->intr_ep_pool[0];
16938 +    ASSERT(ep_desc);
16939 +    ep_desc->next = tmp_ep->next;
16940 +    tmp_ep->next = virt_to_phys(ep_desc);
16941 +    i++;
16942 +  } else {
16943 +    /* Loop through Intr EP descriptor list and insert EP for URB at
16944 +       specified interval */
16945 +    do {
16946 +      /* Each EP descriptor with eof flag sat signals a new frame */
16947 +      if (tmp_ep->command & IO_MASK(USB_EP_command, eof)) {
16948 +       /* Insert a EP from URBs EP pool at correct interval */
16949 +       if ((i % urb_priv->interval) == 0) {
16950 +         ep_desc = urb_priv->intr_ep_pool[pool_idx];
16951 +         ASSERT(ep_desc);
16952 +         ep_desc->next = tmp_ep->next;
16953 +         tmp_ep->next = virt_to_phys(ep_desc);
16954 +         pool_idx++;
16955 +         ASSERT(pool_idx <= urb_priv->intr_ep_pool_length);
16956 +       }
16957 +       i++;
16958 +      }
16959 +      tmp_ep = (struct USB_EP_Desc *)phys_to_virt(tmp_ep->next);
16960 +    } while(tmp_ep != &TxIntrEPList[0]);
16961 +  }
16962 +
16963 +  intr_dbg("Added SBs to intr epid %d: %s interval:%d (%d EP)\n", epid,
16964 +          sblist_to_str(urb_priv->first_sb), urb_priv->interval, pool_idx);
16965 +
16966 +  /* We start the DMA sub channel without checking if it's running or not,
16967 +     because:
16968 +     1) If it's already running, issuing the start command is a nop.
16969 +     2) We avoid a test-and-set race condition. */
16970 +  *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
16971 +}
16972 +
16973 +static void tc_dma_process_isoc_urb(struct urb *urb) {
16974 +  unsigned long flags;
16975 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
16976 +  int epid;
16977 +
16978 +  /* Do not disturb us while fiddling with EPs and epids */
16979 +  local_irq_save(flags);
16980 +
16981 +  ASSERT(urb_priv);
16982 +  ASSERT(urb_priv->first_sb);
16983 +  epid = urb_priv->epid;
16984 +
16985 +  if(activeUrbList[epid] == NULL) {
16986 +    /* EP is idle, so make this URB active */
16987 +    activeUrbList[epid] = urb;
16988 +    urb_list_del(urb, epid);
16989 +    ASSERT(TxIsocEPList[epid].sub == 0);
16990 +    ASSERT(!(TxIsocEPList[epid].command &
16991 +            IO_STATE(USB_EP_command, enable, yes)));
16992 +
16993 +    /* Differentiate between In and Out Isoc. Because In SBs are not consumed*/
16994 +    if(usb_pipein(urb->pipe)) {
16995 +    /* Each EP for In Isoc will have only one SB descriptor, setup when
16996 +       submitting the first active urb. We do it here by copying from URBs
16997 +       pre-allocated SB. */
16998 +      memcpy((void *)&(TxIsocSBList[epid]), urb_priv->first_sb,
16999 +            sizeof(TxIsocSBList[epid]));
17000 +      TxIsocEPList[epid].hw_len = 0;
17001 +      TxIsocEPList[epid].sub = virt_to_phys(&(TxIsocSBList[epid]));
17002 +    } else {
17003 +      /* For Out Isoc we attach the pre-allocated list of SBs for the URB */
17004 +      TxIsocEPList[epid].hw_len = 0;
17005 +      TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
17006 +
17007 +      isoc_dbg("Attached first URB:0x%x[%d] to epid:%d first_sb:0x%x"
17008 +              " last_sb::0x%x\n",
17009 +              (unsigned int)urb, urb_priv->urb_num, epid,
17010 +              (unsigned int)(urb_priv->first_sb),
17011 +              (unsigned int)(urb_priv->last_sb));
17012 +    }
17013 +
17014 +    if (urb->transfer_flags & URB_ISO_ASAP) {
17015 +      /* The isoc transfer should be started as soon as possible. The
17016 +        start_frame field is a return value if URB_ISO_ASAP was set. Comparing
17017 +        R_USB_FM_NUMBER with a USB Chief trace shows that the first isoc IN
17018 +        token is sent 2 frames later. I'm not sure how this affects usage of
17019 +        the start_frame field by the device driver, or how it affects things
17020 +        when USB_ISO_ASAP is not set, so therefore there's no compensation for
17021 +        the 2 frame "lag" here. */
17022 +      urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
17023 +      TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
17024 +      urb_priv->urb_state = STARTED;
17025 +      isoc_dbg("URB_ISO_ASAP set, urb->start_frame set to %d\n",
17026 +              urb->start_frame);
17027 +    } else {
17028 +      /* Not started yet. */
17029 +      urb_priv->urb_state = NOT_STARTED;
17030 +      isoc_warn("urb_priv->urb_state set to NOT_STARTED for URB:0x%x\n",
17031 +               (unsigned int)urb);
17032 +    }
17033 +
17034 +  } else {
17035 +    /* An URB is already active on the EP. Leave URB in queue and let
17036 +       finish_isoc_urb process it after current active URB */
17037 +    ASSERT(TxIsocEPList[epid].sub != 0);
17038 +
17039 +    if(usb_pipein(urb->pipe)) {
17040 +      /* Because there already is a active In URB on this epid we do nothing
17041 +         and the finish_isoc_urb() function will handle switching to next URB*/
17042 +
17043 +    } else { /* For Out Isoc, insert new URBs traffic last in SB-list. */
17044 +      struct USB_SB_Desc *temp_sb_desc;
17045 +
17046 +      /* Set state STARTED to all Out Isoc URBs added to SB list because we
17047 +         don't know how many of them that are finished before descr interrupt*/
17048 +      urb_priv->urb_state = STARTED;
17049 +
17050 +      /* Find end of current SB list by looking for SB with eol flag sat */
17051 +      temp_sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
17052 +      while ((temp_sb_desc->command & IO_MASK(USB_SB_command, eol)) !=
17053 +            IO_STATE(USB_SB_command, eol, yes)) {
17054 +       ASSERT(temp_sb_desc->next);
17055 +       temp_sb_desc = phys_to_virt(temp_sb_desc->next);
17056 +      }
17057 +
17058 +      isoc_dbg("Appended URB:0x%x[%d] (first:0x%x last:0x%x) to epid:%d"
17059 +              " sub:0x%x eol:0x%x\n",
17060 +              (unsigned int)urb, urb_priv->urb_num,
17061 +              (unsigned int)(urb_priv->first_sb),
17062 +              (unsigned int)(urb_priv->last_sb), epid,
17063 +              (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
17064 +              (unsigned int)temp_sb_desc);
17065 +
17066 +      /* Next pointer must be set before eol is removed. */
17067 +      temp_sb_desc->next = virt_to_phys(urb_priv->first_sb);
17068 +      /* Clear the previous end of list flag since there is a new in the
17069 +        added SB descriptor list. */
17070 +      temp_sb_desc->command &= ~IO_MASK(USB_SB_command, eol);
17071 +
17072 +      if (!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
17073 +       __u32 epid_data;
17074 +       /* 8.8.5 in Designer's Reference says we should check for and correct
17075 +          any errors in the EP here.  That should not be necessary if
17076 +          epid_attn is handled correctly, so we assume all is ok. */
17077 +       epid_data = etrax_epid_iso_get(epid);
17078 +       if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) !=
17079 +           IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
17080 +         isoc_err("Disabled Isoc EP with error:%d on epid:%d when appending"
17081 +                  " URB:0x%x[%d]\n",
17082 +                  IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data), epid,
17083 +                  (unsigned int)urb, urb_priv->urb_num);
17084 +       }
17085 +
17086 +       /* The SB list was exhausted. */
17087 +       if (virt_to_phys(urb_priv->last_sb) != TxIsocEPList[epid].sub) {
17088 +         /* The new sublist did not get processed before the EP was
17089 +            disabled.  Setup the EP again. */
17090 +
17091 +         if(virt_to_phys(temp_sb_desc) == TxIsocEPList[epid].sub) {
17092 +           isoc_dbg("EP for epid:%d stoped at SB:0x%x before newly inserted"
17093 +                    ", restarting from this URBs SB:0x%x\n",
17094 +                    epid, (unsigned int)temp_sb_desc,
17095 +                    (unsigned int)(urb_priv->first_sb));
17096 +           TxIsocEPList[epid].hw_len = 0;
17097 +           TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
17098 +           urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
17099 +           /* Enable the EP again so data gets processed this time */
17100 +           TxIsocEPList[epid].command |=
17101 +             IO_STATE(USB_EP_command, enable, yes);
17102 +
17103 +         } else {
17104 +           /* The EP has been disabled but not at end this URB (god knows
17105 +              where). This should generate an epid_attn so we should not be
17106 +              here */
17107 +           isoc_warn("EP was disabled on sb:0x%x before SB list for"
17108 +                    " URB:0x%x[%d] got processed\n",
17109 +                    (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
17110 +                    (unsigned int)urb, urb_priv->urb_num);
17111 +         }
17112 +       } else {
17113 +         /* This might happend if we are slow on this function and isn't
17114 +            an error. */
17115 +         isoc_dbg("EP was disabled and finished with SBs from appended"
17116 +                  " URB:0x%x[%d]\n", (unsigned int)urb, urb_priv->urb_num);
17117 +       }
17118 +      }
17119 +    }
17120 +  }
17121 +  
17122 +  /* Start the DMA sub channel */
17123 +  *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
17124 +
17125 +  local_irq_restore(flags);
17126 +}
17127 +
17128 +static void tc_dma_unlink_intr_urb(struct urb *urb) {
17129 +  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
17130 +  volatile struct USB_EP_Desc *first_ep;  /* First EP in the list. */
17131 +  volatile struct USB_EP_Desc *curr_ep;   /* Current EP, the iterator. */
17132 +  volatile struct USB_EP_Desc *next_ep;   /* The EP after current. */
17133 +  volatile struct USB_EP_Desc *unlink_ep; /* The one we should remove from
17134 +                                            the list. */
17135 +  int count = 0;
17136 +  volatile int timeout = 10000;
17137 +  int epid;
17138 +
17139 +  /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the
17140 +     List". */
17141 +  ASSERT(urb_priv);
17142 +  ASSERT(urb_priv->intr_ep_pool_length > 0);
17143 +  epid = urb_priv->epid;
17144 +
17145 +  /* First disable all Intr EPs belonging to epid for this URB */
17146 +  first_ep = &TxIntrEPList[0];
17147 +  curr_ep = first_ep;
17148 +  do {
17149 +    next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
17150 +    if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
17151 +      /* Disable EP */
17152 +      next_ep->command &= ~IO_MASK(USB_EP_command, enable);
17153 +    }
17154 +    curr_ep = phys_to_virt(curr_ep->next);
17155 +  } while (curr_ep != first_ep);
17156 +
17157 +
17158 +  /* Now unlink all EPs belonging to this epid from Descr list */
17159 +  first_ep = &TxIntrEPList[0];
17160 +  curr_ep = first_ep;
17161 +  do {
17162 +    next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
17163 +    if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
17164 +      /* This is the one we should unlink. */
17165 +      unlink_ep = next_ep;
17166 +
17167 +      /* Actually unlink the EP from the DMA list. */
17168 +      curr_ep->next = unlink_ep->next;
17169 +
17170 +      /* Wait until the DMA is no longer at this descriptor. */
17171 +      while((*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep)) &&
17172 +           (timeout-- > 0));
17173 +      if(timeout == 0) {
17174 +       warn("Timeout while waiting for DMA-TX-Intr to leave unlink EP\n");
17175 +      }
17176 +      
17177 +      count++;
17178 +    }
17179 +    curr_ep = phys_to_virt(curr_ep->next);
17180 +  } while (curr_ep != first_ep);
17181 +
17182 +  if(count != urb_priv->intr_ep_pool_length) {
17183 +    intr_warn("Unlinked %d of %d Intr EPs for URB:0x%x[%d]\n", count,
17184 +             urb_priv->intr_ep_pool_length, (unsigned int)urb,
17185 +             urb_priv->urb_num);
17186 +  } else {
17187 +    intr_dbg("Unlinked %d of %d interrupt EPs for URB:0x%x\n", count,
17188 +            urb_priv->intr_ep_pool_length, (unsigned int)urb);
17189 +  }
17190 +}
17191 +
17192 +static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
17193 +                                                   int timer) {
17194 +  unsigned long flags;
17195 +  int epid;
17196 +  struct urb *urb;
17197 +  struct crisv10_urb_priv * urb_priv;
17198 +  __u32 epid_data;
17199 +
17200 +  /* Protect TxEPList */
17201 +  local_irq_save(flags);
17202 +
17203 +  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
17204 +    /* A finished EP descriptor is disabled and has a valid sub pointer */
17205 +    if (!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
17206 +       (TxBulkEPList[epid].sub != 0)) {
17207 +
17208 +      /* Get the active URB for this epid */
17209 +      urb = activeUrbList[epid];
17210 +      /* Sanity checks */
17211 +      ASSERT(urb);
17212 +      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
17213 +      ASSERT(urb_priv);
17214 +      
17215 +      /* Only handle finished out Bulk EPs here,
17216 +        and let RX interrupt take care of the rest */
17217 +      if(!epid_out_traffic(epid)) {
17218 +       continue;
17219 +      }
17220 +
17221 +      if(timer) {
17222 +       tc_warn("Found finished %s Bulk epid:%d URB:0x%x[%d] from timeout\n",
17223 +               epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
17224 +               urb_priv->urb_num);
17225 +      } else {
17226 +       tc_dbg("Found finished %s Bulk epid:%d URB:0x%x[%d] from interrupt\n",
17227 +              epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
17228 +              urb_priv->urb_num);
17229 +      }
17230 +
17231 +      if(urb_priv->urb_state == UNLINK) {
17232 +       /* This Bulk URB is requested to be unlinked, that means that the EP
17233 +          has been disabled and we might not have sent all data */
17234 +       tc_finish_urb(hcd, urb, urb->status);
17235 +       continue;
17236 +      }
17237 +
17238 +      ASSERT(urb_priv->urb_state == STARTED);
17239 +      if (phys_to_virt(TxBulkEPList[epid].sub) != urb_priv->last_sb) {
17240 +       tc_err("Endpoint got disabled before reaching last sb\n");
17241 +      }
17242 +       
17243 +      epid_data = etrax_epid_get(epid);
17244 +      if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
17245 +         IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
17246 +       /* This means that the endpoint has no error, is disabled
17247 +          and had inserted traffic, i.e. transfer successfully completed. */
17248 +       tc_finish_urb(hcd, urb, 0);
17249 +      } else {
17250 +       /* Shouldn't happen. We expect errors to be caught by epid
17251 +          attention. */
17252 +       tc_err("Found disabled bulk EP desc (epid:%d error:%d)\n",
17253 +              epid, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
17254 +      }
17255 +    } else {
17256 +      tc_dbg("Ignoring In Bulk epid:%d, let RX interrupt handle it\n", epid);
17257 +    }
17258 +  }
17259 +
17260 +  local_irq_restore(flags);
17261 +}
17262 +
17263 +static void check_finished_ctrl_tx_epids(struct usb_hcd *hcd) {
17264 +  unsigned long flags;
17265 +  int epid;
17266 +  struct urb *urb;
17267 +  struct crisv10_urb_priv * urb_priv;
17268 +  __u32 epid_data;
17269 +
17270 +  /* Protect TxEPList */
17271 +  local_irq_save(flags);
17272 +
17273 +  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
17274 +    if(epid == DUMMY_EPID)
17275 +      continue;
17276 +
17277 +    /* A finished EP descriptor is disabled and has a valid sub pointer */
17278 +    if (!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
17279 +       (TxCtrlEPList[epid].sub != 0)) {
17280 +      
17281 +      /* Get the active URB for this epid */
17282 +      urb = activeUrbList[epid];
17283 +
17284 +      if(urb == NULL) {
17285 +       tc_warn("Found finished Ctrl epid:%d with no active URB\n", epid);
17286 +       continue;
17287 +      }
17288 +      
17289 +      /* Sanity checks */
17290 +      ASSERT(usb_pipein(urb->pipe));
17291 +      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
17292 +      ASSERT(urb_priv);
17293 +      if (phys_to_virt(TxCtrlEPList[epid].sub) != urb_priv->last_sb) {
17294 +       tc_err("Endpoint got disabled before reaching last sb\n");
17295 +      }
17296 +
17297 +      epid_data = etrax_epid_get(epid);
17298 +      if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
17299 +         IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
17300 +       /* This means that the endpoint has no error, is disabled
17301 +          and had inserted traffic, i.e. transfer successfully completed. */
17302 +
17303 +       /* Check if RX-interrupt for In Ctrl has been processed before
17304 +          finishing the URB */
17305 +       if(urb_priv->ctrl_rx_done) {
17306 +         tc_dbg("Finishing In Ctrl URB:0x%x[%d] in tx_interrupt\n",
17307 +                (unsigned int)urb, urb_priv->urb_num);
17308 +         tc_finish_urb(hcd, urb, 0);
17309 +       } else {
17310 +         /* If we get zout descriptor interrupt before RX was done for a
17311 +            In Ctrl transfer, then we flag that and it will be finished
17312 +            in the RX-Interrupt */
17313 +         urb_priv->ctrl_zout_done = 1;
17314 +         tc_dbg("Got zout descr interrupt before RX interrupt\n");
17315 +       }
17316 +      } else {
17317 +       /* Shouldn't happen. We expect errors to be caught by epid
17318 +          attention. */
17319 +       tc_err("Found disabled Ctrl EP desc (epid:%d URB:0x%x[%d]) error_code:%d\n", epid, (unsigned int)urb, urb_priv->urb_num, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
17320 +       __dump_ep_desc(&(TxCtrlEPList[epid]));
17321 +       __dump_ept_data(epid);
17322 +      }      
17323 +    }
17324 +  }
17325 +  local_irq_restore(flags);
17326 +}
17327 +
17328 +/* This function goes through all epids that are setup for Out Isoc transfers
17329 +   and marks (isoc_out_done) all queued URBs that the DMA has finished
17330 +   transfer for.
17331 +   No URB completetion is done here to make interrupt routine return quickly.
17332 +   URBs are completed later with help of complete_isoc_bottom_half() that
17333 +   becomes schedules when this functions is finished. */
17334 +static void check_finished_isoc_tx_epids(void) {
17335 +  unsigned long flags;
17336 +  int epid;
17337 +  struct urb *urb;
17338 +  struct crisv10_urb_priv * urb_priv;
17339 +  struct USB_SB_Desc* sb_desc;
17340 +  int epid_done;
17341 +
17342 +  /* Protect TxIsocEPList */
17343 +  local_irq_save(flags);
17344 +
17345 +  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
17346 +    if (TxIsocEPList[epid].sub == 0 || epid == INVALID_EPID ||
17347 +       !epid_out_traffic(epid)) {
17348 +      /* Nothing here to see. */
17349 +      continue;
17350 +    }
17351 +    ASSERT(epid_inuse(epid));
17352 +    ASSERT(epid_isoc(epid));
17353 +
17354 +    sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
17355 +    /* Find the last descriptor of the currently active URB for this ep.
17356 +       This is the first descriptor in the sub list marked for a descriptor
17357 +       interrupt. */
17358 +    while (sb_desc && !IO_EXTRACT(USB_SB_command, intr, sb_desc->command)) {
17359 +      sb_desc = sb_desc->next ? phys_to_virt(sb_desc->next) : 0;
17360 +    }
17361 +    ASSERT(sb_desc);
17362 +
17363 +    isoc_dbg("Descr IRQ checking epid:%d sub:0x%x intr:0x%x\n",
17364 +            epid, (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
17365 +            (unsigned int)sb_desc);
17366 +
17367 +    urb = activeUrbList[epid];
17368 +    if(urb == NULL) {
17369 +      isoc_err("Isoc Descr irq on epid:%d with no active URB\n", epid);
17370 +      continue;
17371 +    }
17372 +
17373 +    epid_done = 0;
17374 +    while(urb && !epid_done) {
17375 +      /* Sanity check. */
17376 +      ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
17377 +      ASSERT(usb_pipeout(urb->pipe));
17378 +      
17379 +      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
17380 +      ASSERT(urb_priv);
17381 +      ASSERT(urb_priv->urb_state == STARTED ||
17382 +            urb_priv->urb_state == UNLINK);
17383 +      
17384 +      if (sb_desc != urb_priv->last_sb) {
17385 +       /* This urb has been sent. */
17386 +       urb_priv->isoc_out_done = 1;
17387 +
17388 +      } else { /* Found URB that has last_sb as the interrupt reason */
17389 +
17390 +       /* Check if EP has been disabled, meaning that all transfers are done*/
17391 +       if(!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
17392 +         ASSERT((sb_desc->command & IO_MASK(USB_SB_command, eol)) ==
17393 +                IO_STATE(USB_SB_command, eol, yes));
17394 +         ASSERT(sb_desc->next == 0);
17395 +         urb_priv->isoc_out_done = 1;
17396 +       } else {
17397 +         isoc_dbg("Skipping URB:0x%x[%d] because EP not disabled yet\n",
17398 +                  (unsigned int)urb, urb_priv->urb_num);
17399 +       }
17400 +       /* Stop looking any further in queue */
17401 +       epid_done = 1;  
17402 +      }
17403 +
17404 +      if (!epid_done) {
17405 +       if(urb == activeUrbList[epid]) {
17406 +         urb = urb_list_first(epid);
17407 +       } else {
17408 +         urb = urb_list_next(urb, epid);
17409 +       }
17410 +      }
17411 +    } /* END: while(urb && !epid_done) */
17412 +  }
17413 +
17414 +  local_irq_restore(flags);
17415 +}
17416 +
17417 +
17418 +/* This is where the Out Isoc URBs are realy completed. This function is
17419 +   scheduled from tc_dma_tx_interrupt() when one or more Out Isoc transfers
17420 +   are done. This functions completes all URBs earlier marked with
17421 +   isoc_out_done by fast interrupt routine check_finished_isoc_tx_epids() */
17422 +
17423 +static void complete_isoc_bottom_half(void *data) {
17424 +  struct crisv10_isoc_complete_data *comp_data;
17425 +  struct usb_iso_packet_descriptor *packet;
17426 +  struct crisv10_urb_priv * urb_priv;
17427 +  unsigned long flags;
17428 +  struct urb* urb;
17429 +  int epid_done;
17430 +  int epid;
17431 +  int i;
17432 +
17433 +  comp_data = (struct crisv10_isoc_complete_data*)data;
17434 +
17435 +  local_irq_save(flags);
17436 +
17437 +  for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
17438 +    if(!epid_inuse(epid) || !epid_isoc(epid) || !epid_out_traffic(epid) || epid == DUMMY_EPID) {
17439 +      /* Only check valid Out Isoc epids */
17440 +      continue;
17441 +    }
17442 +
17443 +    isoc_dbg("Isoc bottom-half checking epid:%d, sub:0x%x\n", epid,
17444 +            (unsigned int)phys_to_virt(TxIsocEPList[epid].sub));
17445 +
17446 +    /* The descriptor interrupt handler has marked all transmitted Out Isoc
17447 +       URBs with isoc_out_done.  Now we traverse all epids and for all that
17448 +       have out Isoc traffic we traverse its URB list and complete the
17449 +       transmitted URBs. */
17450 +    epid_done = 0;
17451 +    while (!epid_done) {
17452 +
17453 +      /* Get the active urb (if any) */
17454 +      urb = activeUrbList[epid];
17455 +      if (urb == 0) {
17456 +       isoc_dbg("No active URB on epid:%d anymore\n", epid);
17457 +       epid_done = 1;
17458 +       continue;
17459 +      }
17460 +
17461 +      /* Sanity check. */
17462 +      ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
17463 +      ASSERT(usb_pipeout(urb->pipe));
17464 +
17465 +      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
17466 +      ASSERT(urb_priv);
17467 +
17468 +      if (!(urb_priv->isoc_out_done)) {
17469 +       /* We have reached URB that isn't flaged done yet, stop traversing. */
17470 +       isoc_dbg("Stoped traversing Out Isoc URBs on epid:%d"
17471 +                " before not yet flaged URB:0x%x[%d]\n",
17472 +                epid, (unsigned int)urb, urb_priv->urb_num);
17473 +       epid_done = 1;
17474 +       continue;
17475 +      }
17476 +
17477 +      /* This urb has been sent. */
17478 +      isoc_dbg("Found URB:0x%x[%d] that is flaged isoc_out_done\n",
17479 +              (unsigned int)urb, urb_priv->urb_num);
17480 +
17481 +      /* Set ok on transfered packets for this URB and finish it */
17482 +      for (i = 0; i < urb->number_of_packets; i++) {
17483 +       packet = &urb->iso_frame_desc[i];
17484 +       packet->status = 0;
17485 +       packet->actual_length = packet->length;
17486 +      }
17487 +      urb_priv->isoc_packet_counter = urb->number_of_packets;
17488 +      tc_finish_urb(comp_data->hcd, urb, 0);
17489 +
17490 +    } /* END: while(!epid_done) */
17491 +  } /* END: for(epid...) */
17492 +
17493 +  local_irq_restore(flags);
17494 +  kmem_cache_free(isoc_compl_cache, comp_data);
17495 +}
17496 +
17497 +
17498 +static void check_finished_intr_tx_epids(struct usb_hcd *hcd) {
17499 +  unsigned long flags;
17500 +  int epid;
17501 +  struct urb *urb;
17502 +  struct crisv10_urb_priv * urb_priv;
17503 +  volatile struct USB_EP_Desc *curr_ep;   /* Current EP, the iterator. */
17504 +  volatile struct USB_EP_Desc *next_ep;   /* The EP after current. */
17505 +
17506 +  /* Protect TxintrEPList */
17507 +  local_irq_save(flags);
17508 +
17509 +  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
17510 +    if(!epid_inuse(epid) || !epid_intr(epid) || !epid_out_traffic(epid)) {
17511 +      /* Nothing to see on this epid. Only check valid Out Intr epids */
17512 +      continue;
17513 +    }
17514 +
17515 +    urb = activeUrbList[epid];
17516 +    if(urb == 0) {
17517 +      intr_warn("Found Out Intr epid:%d with no active URB\n", epid);
17518 +      continue;
17519 +    }
17520 +
17521 +    /* Sanity check. */
17522 +    ASSERT(usb_pipetype(urb->pipe) == PIPE_INTERRUPT);
17523 +    ASSERT(usb_pipeout(urb->pipe));
17524 +    
17525 +    urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
17526 +    ASSERT(urb_priv);
17527 +
17528 +    /* Go through EPs between first and second sof-EP. It's here Out Intr EPs
17529 +       are inserted.*/
17530 +    curr_ep = &TxIntrEPList[0];
17531 +    do {
17532 +      next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
17533 +      if(next_ep == urb_priv->intr_ep_pool[0]) {
17534 +       /* We found the Out Intr EP for this epid */
17535 +       
17536 +       /* Disable it so it doesn't get processed again */
17537 +       next_ep->command &= ~IO_MASK(USB_EP_command, enable);
17538 +
17539 +       /* Finish the active Out Intr URB with status OK */
17540 +       tc_finish_urb(hcd, urb, 0);
17541 +      }
17542 +      curr_ep = phys_to_virt(curr_ep->next);
17543 +    } while (curr_ep != &TxIntrEPList[1]);
17544 +
17545 +  }
17546 +  local_irq_restore(flags);
17547 +}
17548 +
17549 +/* Interrupt handler for DMA8/IRQ24 with subchannels (called from hardware intr) */
17550 +static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc) {
17551 +  struct usb_hcd *hcd = (struct usb_hcd*)vhc;
17552 +  ASSERT(hcd);
17553 +
17554 +  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub0_descr)) {
17555 +    /* Clear this interrupt */
17556 +    *R_DMA_CH8_SUB0_CLR_INTR = IO_STATE(R_DMA_CH8_SUB0_CLR_INTR, clr_descr, do);
17557 +    restart_dma8_sub0();
17558 +  }
17559 +
17560 +  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub1_descr)) {
17561 +    /* Clear this interrupt */
17562 +    *R_DMA_CH8_SUB1_CLR_INTR = IO_STATE(R_DMA_CH8_SUB1_CLR_INTR, clr_descr, do);
17563 +    check_finished_ctrl_tx_epids(hcd);
17564 +  }
17565 +
17566 +  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub2_descr)) {
17567 +    /* Clear this interrupt */
17568 +    *R_DMA_CH8_SUB2_CLR_INTR = IO_STATE(R_DMA_CH8_SUB2_CLR_INTR, clr_descr, do);
17569 +    check_finished_intr_tx_epids(hcd);
17570 +  }
17571 +
17572 +  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub3_descr)) {
17573 +    struct crisv10_isoc_complete_data* comp_data;
17574 +
17575 +    /* Flag done Out Isoc for later completion */
17576 +    check_finished_isoc_tx_epids();
17577 +
17578 +    /* Clear this interrupt */
17579 +    *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
17580 +    /* Schedule bottom half of Out Isoc completion function. This function
17581 +       finishes the URBs marked with isoc_out_done */
17582 +    comp_data = (struct crisv10_isoc_complete_data*)
17583 +      kmem_cache_alloc(isoc_compl_cache, SLAB_ATOMIC);
17584 +    ASSERT(comp_data != NULL);
17585 +    comp_data ->hcd = hcd;
17586 +
17587 +    INIT_WORK(&comp_data->usb_bh, complete_isoc_bottom_half, comp_data);
17588 +    schedule_work(&comp_data->usb_bh);
17589 +  }
17590 +
17591 +  return IRQ_HANDLED;
17592 +}
17593 +
17594 +/* Interrupt handler for DMA9/IRQ25 (called from hardware intr) */
17595 +static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc) {
17596 +  unsigned long flags;
17597 +  struct urb *urb;
17598 +  struct usb_hcd *hcd = (struct usb_hcd*)vhc;
17599 +  struct crisv10_urb_priv *urb_priv;
17600 +  int epid = 0;
17601 +  int real_error;
17602 +
17603 +  ASSERT(hcd);
17604 +
17605 +  /* Clear this interrupt. */
17606 +  *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do);
17607 +
17608 +  /* Custom clear interrupt for this interrupt */
17609 +  /* The reason we cli here is that we call the driver's callback functions. */
17610 +  local_irq_save(flags);
17611 +
17612 +  /* Note that this while loop assumes that all packets span only
17613 +     one rx descriptor. */
17614 +  while(myNextRxDesc->status & IO_MASK(USB_IN_status, eop)) {
17615 +    epid = IO_EXTRACT(USB_IN_status, epid, myNextRxDesc->status);
17616 +    /* Get the active URB for this epid */
17617 +    urb = activeUrbList[epid];
17618 +
17619 +    ASSERT(epid_inuse(epid));
17620 +    if (!urb) {
17621 +      dma_err("No urb for epid %d in rx interrupt\n", epid);
17622 +      goto skip_out;
17623 +    }
17624 +
17625 +    /* Check if any errors on epid */
17626 +    real_error = 0;
17627 +    if (myNextRxDesc->status & IO_MASK(USB_IN_status, error)) {
17628 +      __u32 r_usb_ept_data;
17629 +
17630 +      if (usb_pipeisoc(urb->pipe)) {
17631 +       r_usb_ept_data = etrax_epid_iso_get(epid);
17632 +       if((r_usb_ept_data & IO_MASK(R_USB_EPT_DATA_ISO, valid)) &&
17633 +          (IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data) == 0) &&
17634 +          (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata))) {
17635 +         /* Not an error, just a failure to receive an expected iso
17636 +            in packet in this frame.  This is not documented
17637 +            in the designers reference. Continue processing.
17638 +         */
17639 +       } else real_error = 1;
17640 +      } else real_error = 1;
17641 +    }
17642 +
17643 +    if(real_error) {
17644 +      dma_err("Error in RX descr on epid:%d for URB 0x%x",
17645 +             epid, (unsigned int)urb);
17646 +      dump_ept_data(epid);
17647 +      dump_in_desc(myNextRxDesc);
17648 +      goto skip_out;
17649 +    }
17650 +
17651 +    urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
17652 +    ASSERT(urb_priv);
17653 +    ASSERT(urb_priv->urb_state == STARTED ||
17654 +          urb_priv->urb_state == UNLINK);
17655 +
17656 +    if ((usb_pipetype(urb->pipe) == PIPE_BULK) ||
17657 +       (usb_pipetype(urb->pipe) == PIPE_CONTROL) ||
17658 +       (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
17659 +
17660 +      /* We get nodata for empty data transactions, and the rx descriptor's
17661 +        hw_len field is not valid in that case. No data to copy in other
17662 +        words. */
17663 +      if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
17664 +       /* No data to copy */
17665 +      } else {
17666 +       /*
17667 +       dma_dbg("Processing RX for URB:0x%x epid:%d (data:%d ofs:%d)\n",
17668 +               (unsigned int)urb, epid, myNextRxDesc->hw_len,
17669 +               urb_priv->rx_offset);
17670 +       */
17671 +       /* Only copy data if URB isn't flaged to be unlinked*/
17672 +       if(urb_priv->urb_state != UNLINK) {
17673 +         /* Make sure the data fits in the buffer. */
17674 +         if(urb_priv->rx_offset + myNextRxDesc->hw_len
17675 +            <= urb->transfer_buffer_length) {
17676 +
17677 +           /* Copy the data to URBs buffer */
17678 +           memcpy(urb->transfer_buffer + urb_priv->rx_offset,
17679 +                  phys_to_virt(myNextRxDesc->buf), myNextRxDesc->hw_len);
17680 +           urb_priv->rx_offset += myNextRxDesc->hw_len;
17681 +         } else {
17682 +           /* Signal overflow when returning URB */
17683 +           urb->status = -EOVERFLOW;
17684 +           tc_finish_urb_later(hcd, urb, urb->status);
17685 +         }
17686 +       }
17687 +      }
17688 +
17689 +      /* Check if it was the last packet in the transfer */
17690 +      if (myNextRxDesc->status & IO_MASK(USB_IN_status, eot)) {
17691 +       /* Special handling for In Ctrl URBs. */
17692 +       if(usb_pipecontrol(urb->pipe) && usb_pipein(urb->pipe) &&
17693 +          !(urb_priv->ctrl_zout_done)) {
17694 +         /* Flag that RX part of Ctrl transfer is done. Because zout descr
17695 +            interrupt hasn't happend yet will the URB be finished in the
17696 +            TX-Interrupt. */
17697 +         urb_priv->ctrl_rx_done = 1;
17698 +         tc_dbg("Not finishing In Ctrl URB:0x%x from rx_interrupt, waiting"
17699 +                " for zout\n", (unsigned int)urb);
17700 +       } else {
17701 +         tc_finish_urb(hcd, urb, 0);
17702 +       }
17703 +      }
17704 +    } else { /* ISOC RX */
17705 +      /*
17706 +      isoc_dbg("Processing RX for epid:%d (URB:0x%x) ISOC pipe\n",
17707 +              epid, (unsigned int)urb);
17708 +      */
17709 +
17710 +      struct usb_iso_packet_descriptor *packet;
17711 +
17712 +      if (urb_priv->urb_state == UNLINK) {
17713 +       isoc_warn("Ignoring Isoc Rx data for urb being unlinked.\n");
17714 +       goto skip_out;
17715 +      } else if (urb_priv->urb_state == NOT_STARTED) {
17716 +       isoc_err("What? Got Rx data for Isoc urb that isn't started?\n");
17717 +       goto skip_out;
17718 +      }
17719 +
17720 +      packet = &urb->iso_frame_desc[urb_priv->isoc_packet_counter];
17721 +      ASSERT(packet);
17722 +      packet->status = 0;
17723 +
17724 +      if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
17725 +       /* We get nodata for empty data transactions, and the rx descriptor's
17726 +          hw_len field is not valid in that case. We copy 0 bytes however to
17727 +          stay in synch. */
17728 +       packet->actual_length = 0;
17729 +      } else {
17730 +       packet->actual_length = myNextRxDesc->hw_len;
17731 +       /* Make sure the data fits in the buffer. */
17732 +       ASSERT(packet->actual_length <= packet->length);
17733 +       memcpy(urb->transfer_buffer + packet->offset,
17734 +              phys_to_virt(myNextRxDesc->buf), packet->actual_length);
17735 +       if(packet->actual_length > 0)
17736 +         isoc_dbg("Copied %d bytes, packet %d for URB:0x%x[%d]\n",
17737 +                  packet->actual_length, urb_priv->isoc_packet_counter,
17738 +                  (unsigned int)urb, urb_priv->urb_num);
17739 +      }
17740 +
17741 +      /* Increment the packet counter. */
17742 +      urb_priv->isoc_packet_counter++;
17743 +
17744 +      /* Note that we don't care about the eot field in the rx descriptor's
17745 +        status. It will always be set for isoc traffic. */
17746 +      if (urb->number_of_packets == urb_priv->isoc_packet_counter) {
17747 +       /* Complete the urb with status OK. */
17748 +       tc_finish_urb(hcd, urb, 0);
17749 +      }
17750 +    }
17751 +
17752 +  skip_out:
17753 +    myNextRxDesc->status = 0;
17754 +    myNextRxDesc->command |= IO_MASK(USB_IN_command, eol);
17755 +    myLastRxDesc->command &= ~IO_MASK(USB_IN_command, eol);
17756 +    myLastRxDesc = myNextRxDesc;
17757 +    myNextRxDesc = phys_to_virt(myNextRxDesc->next);
17758 +    flush_etrax_cache();
17759 +    *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, restart);
17760 +  }
17761 +
17762 +  local_irq_restore(flags);
17763 +
17764 +  return IRQ_HANDLED;
17765 +}
17766 +
17767 +static void tc_bulk_start_timer_func(unsigned long dummy) {
17768 +  /* We might enable an EP descriptor behind the current DMA position when
17769 +     it's about to decide that there are no more bulk traffic and it should
17770 +     stop the bulk channel.
17771 +     Therefore we periodically check if the bulk channel is stopped and there
17772 +     is an enabled bulk EP descriptor, in which case we start the bulk
17773 +     channel. */
17774 +  
17775 +  if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
17776 +    int epid;
17777 +
17778 +    timer_dbg("bulk_start_timer: Bulk DMA channel not running.\n");
17779 +
17780 +    for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
17781 +      if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
17782 +       timer_warn("Found enabled EP for epid %d, starting bulk channel.\n",
17783 +                  epid);
17784 +       restart_dma8_sub0();
17785 +
17786 +       /* Restart the bulk eot timer since we just started the bulk channel.*/
17787 +       mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
17788 +
17789 +       /* No need to search any further. */
17790 +       break;
17791 +      }
17792 +    }
17793 +  } else {
17794 +    timer_dbg("bulk_start_timer: Bulk DMA channel running.\n");
17795 +  }
17796 +}
17797 +
17798 +static void tc_bulk_eot_timer_func(unsigned long dummy) {
17799 +  struct usb_hcd *hcd = (struct usb_hcd*)dummy;
17800 +  ASSERT(hcd);
17801 +  /* Because of a race condition in the top half, we might miss a bulk eot.
17802 +     This timer "simulates" a bulk eot if we don't get one for a while,
17803 +     hopefully correcting the situation. */
17804 +  timer_dbg("bulk_eot_timer timed out.\n");
17805 +  check_finished_bulk_tx_epids(hcd, 1);
17806 +}
17807 +
17808 +
17809 +/*************************************************************/
17810 +/*************************************************************/
17811 +/* Device driver block                                       */
17812 +/*************************************************************/
17813 +/*************************************************************/
17814 +
17815 +/* Forward declarations for device driver functions */
17816 +static int devdrv_hcd_probe(struct device *);
17817 +static int devdrv_hcd_remove(struct device *);
17818 +#ifdef CONFIG_PM
17819 +static int devdrv_hcd_suspend(struct device *, u32, u32);
17820 +static int devdrv_hcd_resume(struct device *, u32);
17821 +#endif /* CONFIG_PM */
17822 +
17823 +/* the device */
17824 +static struct platform_device *devdrv_hc_platform_device;
17825 +
17826 +/* device driver interface */
17827 +static struct device_driver devdrv_hc_device_driver = {
17828 +  .name =                      (char *) hc_name,
17829 +  .bus =                       &platform_bus_type,
17830 +
17831 +  .probe =             devdrv_hcd_probe,
17832 +  .remove =            devdrv_hcd_remove,
17833 +
17834 +#ifdef CONFIG_PM
17835 +  .suspend =           devdrv_hcd_suspend,
17836 +  .resume =            devdrv_hcd_resume,
17837 +#endif /* CONFIG_PM */
17838 +};
17839 +
17840 +/* initialize the host controller and driver  */
17841 +static int __init_or_module devdrv_hcd_probe(struct device *dev)
17842 +{
17843 +  struct usb_hcd *hcd;
17844 +  struct crisv10_hcd *crisv10_hcd;
17845 +  int retval;
17846 +
17847 +  /* Check DMA burst length */
17848 +  if(IO_EXTRACT(R_BUS_CONFIG, dma_burst, *R_BUS_CONFIG) !=
17849 +     IO_STATE(R_BUS_CONFIG, dma_burst, burst32)) {
17850 +    devdrv_err("Invalid DMA burst length in Etrax 100LX,"
17851 +              " needs to be 32\n");
17852 +    return -EPERM;
17853 +  }
17854 +
17855 +  hcd = usb_create_hcd(&crisv10_hc_driver, dev, dev->bus_id);
17856 +  if (!hcd)
17857 +    return -ENOMEM;
17858 +
17859 +  crisv10_hcd = hcd_to_crisv10_hcd(hcd);
17860 +  spin_lock_init(&crisv10_hcd->lock);
17861 +  crisv10_hcd->num_ports = num_ports();
17862 +  crisv10_hcd->running = 0;
17863 +
17864 +  dev_set_drvdata(dev, crisv10_hcd);
17865 +
17866 +  devdrv_dbg("ETRAX USB IRQs HC:%d  RX:%d  TX:%d\n", ETRAX_USB_HC_IRQ,
17867 +         ETRAX_USB_RX_IRQ, ETRAX_USB_TX_IRQ);
17868 +
17869 +  /* Print out chip version read from registers */
17870 +  int rev_maj = *R_USB_REVISION & IO_MASK(R_USB_REVISION, major);
17871 +  int rev_min = *R_USB_REVISION & IO_MASK(R_USB_REVISION, minor);
17872 +  if(rev_min == 0) {
17873 +    devdrv_info("Etrax 100LX USB Revision %d v1,2\n", rev_maj);
17874 +  } else {
17875 +    devdrv_info("Etrax 100LX USB Revision %d v%d\n", rev_maj, rev_min);
17876 +  }
17877 +
17878 +  devdrv_info("Bulk timer interval, start:%d eot:%d\n",
17879 +             BULK_START_TIMER_INTERVAL,
17880 +             BULK_EOT_TIMER_INTERVAL);
17881 +
17882 +
17883 +  /* Init root hub data structures */
17884 +  if(rh_init()) {
17885 +    devdrv_err("Failed init data for Root Hub\n");
17886 +    retval = -ENOMEM;
17887 +  }
17888 +
17889 +  if(port_in_use(0)) {
17890 +    if (cris_request_io_interface(if_usb_1, "ETRAX100LX USB-HCD")) {
17891 +      printk(KERN_CRIT "usb-host: request IO interface usb1 failed");
17892 +      retval = -EBUSY;
17893 +      goto out;
17894 +    }
17895 +    devdrv_info("Claimed interface for USB physical port 1\n");
17896 +  }
17897 +  if(port_in_use(1)) {
17898 +    if (cris_request_io_interface(if_usb_2, "ETRAX100LX USB-HCD")) {
17899 +      /* Free first interface if second failed to be claimed */
17900 +      if(port_in_use(0)) {
17901 +       cris_free_io_interface(if_usb_1);
17902 +      }
17903 +      printk(KERN_CRIT "usb-host: request IO interface usb2 failed");
17904 +      retval = -EBUSY;
17905 +      goto out;
17906 +    }
17907 +    devdrv_info("Claimed interface for USB physical port 2\n");
17908 +  }
17909 +  
17910 +  /* Init transfer controller structs and locks */
17911 +  if((retval = tc_init(hcd)) != 0) {
17912 +    goto out;
17913 +  }
17914 +
17915 +  /* Attach interrupt functions for DMA and init DMA controller */
17916 +  if((retval = tc_dma_init(hcd)) != 0) {
17917 +    goto out;
17918 +  }
17919 +
17920 +  /* Attach the top IRQ handler for USB controller interrupts */
17921 +  if (request_irq(ETRAX_USB_HC_IRQ, crisv10_hcd_top_irq, 0,
17922 +                 "ETRAX 100LX built-in USB (HC)", hcd)) {
17923 +    err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ);
17924 +    retval = -EBUSY;
17925 +    goto out;
17926 +  }
17927 +
17928 +  /* iso_eof is only enabled when isoc traffic is running. */
17929 +  *R_USB_IRQ_MASK_SET =
17930 +    /* IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) | */
17931 +    IO_STATE(R_USB_IRQ_MASK_SET, bulk_eot, set) |
17932 +    IO_STATE(R_USB_IRQ_MASK_SET, epid_attn, set) |
17933 +    IO_STATE(R_USB_IRQ_MASK_SET, port_status, set) |
17934 +    IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
17935 +
17936 +
17937 +  crisv10_ready_wait();
17938 +  /* Reset the USB interface. */
17939 +  *R_USB_COMMAND =
17940 +    IO_STATE(R_USB_COMMAND, port_sel, nop) |
17941 +    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
17942 +    IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
17943 +
17944 +  /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to
17945 +     0x2A30 (10800), to guarantee that control traffic gets 10% of the
17946 +     bandwidth, and periodic transfer may allocate the rest (90%).
17947 +     This doesn't work though.
17948 +     The value 11960 is chosen to be just after the SOF token, with a couple
17949 +     of bit times extra for possible bit stuffing. */
17950 +  *R_USB_FM_PSTART = IO_FIELD(R_USB_FM_PSTART, value, 11960);
17951 +
17952 +  crisv10_ready_wait();
17953 +  /* Configure the USB interface as a host controller. */
17954 +  *R_USB_COMMAND =
17955 +    IO_STATE(R_USB_COMMAND, port_sel, nop) |
17956 +    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
17957 +    IO_STATE(R_USB_COMMAND, ctrl_cmd, host_config);
17958 +
17959 +
17960 +  /* Check so controller not busy before enabling ports */
17961 +  crisv10_ready_wait();
17962 +
17963 +  /* Enable selected USB ports */
17964 +  if(port_in_use(0)) {
17965 +    *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
17966 +  } else {
17967 +    *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
17968 +  }
17969 +  if(port_in_use(1)) {
17970 +    *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
17971 +  } else {
17972 +    *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
17973 +  }
17974 +
17975 +  crisv10_ready_wait();
17976 +  /* Start processing of USB traffic. */
17977 +  *R_USB_COMMAND =
17978 +    IO_STATE(R_USB_COMMAND, port_sel, nop) |
17979 +    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
17980 +    IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
17981 +
17982 +  /* Do not continue probing initialization before USB interface is done */
17983 +  crisv10_ready_wait();
17984 +
17985 +  /* Register our Host Controller to USB Core
17986 +   * Finish the remaining parts of generic HCD initialization: allocate the
17987 +   * buffers of consistent memory, register the bus
17988 +   * and call the driver's reset() and start() routines. */
17989 +  retval = usb_add_hcd(hcd, ETRAX_USB_HC_IRQ, IRQF_DISABLED);
17990 +  if (retval != 0) {
17991 +    devdrv_err("Failed registering HCD driver\n");
17992 +    goto out;
17993 +  }
17994 +
17995 +  return 0;
17996 +
17997 + out:
17998 +  devdrv_hcd_remove(dev);
17999 +  return retval;
18000 +}
18001 +
18002 +
18003 +/* cleanup after the host controller and driver */
18004 +static int __init_or_module devdrv_hcd_remove(struct device *dev)
18005 +{
18006 +  struct crisv10_hcd *crisv10_hcd = dev_get_drvdata(dev);
18007 +  struct usb_hcd *hcd;
18008 +
18009 +  if (!crisv10_hcd)
18010 +    return 0;
18011 +  hcd = crisv10_hcd_to_hcd(crisv10_hcd);
18012 +
18013 +
18014 +  /* Stop USB Controller in Etrax 100LX */
18015 +  crisv10_hcd_reset(hcd);
18016 +
18017 +  usb_remove_hcd(hcd);
18018 +  devdrv_dbg("Removed HCD from USB Core\n");
18019 +
18020 +  /* Free USB Controller IRQ */
18021 +  free_irq(ETRAX_USB_HC_IRQ, NULL);
18022 +
18023 +  /* Free resources */
18024 +  tc_dma_destroy();
18025 +  tc_destroy();
18026 +
18027 +
18028 +  if(port_in_use(0)) {
18029 +    cris_free_io_interface(if_usb_1);
18030 +  }
18031 +  if(port_in_use(1)) {
18032 +    cris_free_io_interface(if_usb_2);
18033 +  }
18034 +
18035 +  devdrv_dbg("Freed all claimed resources\n");
18036 +
18037 +  return 0;
18038 +}
18039 +
18040 +
18041 +#ifdef CONFIG_PM
18042 +
18043 +static int devdrv_hcd_suspend(struct usb_hcd *hcd, u32 state, u32 level)
18044 +{
18045 +  return 0; /* no-op for now */
18046 +}
18047 +
18048 +static int devdrv_hcd_resume(struct usb_hcd *hcd, u32 level)
18049 +{
18050 +  return 0; /* no-op for now */
18051 +}
18052 +
18053 +#endif /* CONFIG_PM */
18054 +
18055 +
18056 +
18057 +/*************************************************************/
18058 +/*************************************************************/
18059 +/* Module block                                              */
18060 +/*************************************************************/
18061 +/*************************************************************/
18062
18063 +/* register driver */
18064 +static int __init module_hcd_init(void) 
18065 +{
18066 +  
18067 +  if (usb_disabled())
18068 +    return -ENODEV;
18069 +
18070 +  /* Here we select enabled ports by following defines created from
18071 +     menuconfig */
18072 +#ifndef CONFIG_ETRAX_USB_HOST_PORT1
18073 +  ports &= ~(1<<0);
18074 +#endif
18075 +#ifndef CONFIG_ETRAX_USB_HOST_PORT2
18076 +  ports &= ~(1<<1);
18077 +#endif
18078 +
18079 +  printk(KERN_INFO "%s version "VERSION" "COPYRIGHT"\n", product_desc);
18080 +
18081 +  devdrv_hc_platform_device =
18082 +    platform_device_register_simple((char *) hc_name, 0, NULL, 0);
18083 +
18084 +  if (IS_ERR(devdrv_hc_platform_device))
18085 +    return PTR_ERR(devdrv_hc_platform_device);
18086 +  return driver_register(&devdrv_hc_device_driver);
18087 +  /* 
18088 +   * Note that we do not set the DMA mask for the device,
18089 +   * i.e. we pretend that we will use PIO, since no specific
18090 +   * allocation routines are needed for DMA buffers. This will
18091 +   * cause the HCD buffer allocation routines to fall back to
18092 +   * kmalloc().
18093 +   */
18094 +}
18095 +
18096 +/* unregister driver */
18097 +static void __exit module_hcd_exit(void) 
18098 +{      
18099 +  driver_unregister(&devdrv_hc_device_driver);
18100 +}
18101 +
18102 +
18103 +/* Module hooks */
18104 +module_init(module_hcd_init);
18105 +module_exit(module_hcd_exit);
18106 --- linux-2.6.19.2.orig/drivers/usb/host/hc_crisv10.h   2007-01-10 20:10:37.000000000 +0100
18107 +++ linux-2.6.19.2.dev/drivers/usb/host/hc_crisv10.h    1970-01-01 01:00:00.000000000 +0100
18108 @@ -1,289 +0,0 @@
18109 -#ifndef __LINUX_ETRAX_USB_H
18110 -#define __LINUX_ETRAX_USB_H
18111 -
18112 -#include <linux/types.h>
18113 -#include <linux/list.h>
18114 -
18115 -typedef struct USB_IN_Desc {
18116 -       volatile __u16 sw_len;
18117 -       volatile __u16 command;
18118 -       volatile unsigned long next;
18119 -       volatile unsigned long buf;
18120 -       volatile __u16 hw_len;
18121 -       volatile __u16 status;
18122 -} USB_IN_Desc_t;
18123 -
18124 -typedef struct USB_SB_Desc {
18125 -       volatile __u16 sw_len;
18126 -       volatile __u16 command;
18127 -       volatile unsigned long next;
18128 -       volatile unsigned long buf;
18129 -       __u32 dummy;
18130 -} USB_SB_Desc_t;
18131 -
18132 -typedef struct USB_EP_Desc {
18133 -       volatile __u16 hw_len;
18134 -       volatile __u16 command;
18135 -       volatile unsigned long sub;
18136 -       volatile unsigned long next;
18137 -       __u32 dummy;
18138 -} USB_EP_Desc_t;
18139 -
18140 -struct virt_root_hub {
18141 -       int devnum;
18142 -       void *urb;
18143 -       void *int_addr;
18144 -       int send;
18145 -       int interval;
18146 -       int numports;
18147 -       struct timer_list rh_int_timer;
18148 -       volatile __u16 wPortChange_1;
18149 -       volatile __u16 wPortChange_2;
18150 -       volatile __u16 prev_wPortStatus_1;
18151 -       volatile __u16 prev_wPortStatus_2;
18152 -};
18153 -
18154 -struct etrax_usb_intr_traffic {
18155 -       int sleeping;
18156 -       int error;
18157 -       struct wait_queue *wq;
18158 -};
18159 -
18160 -typedef struct etrax_usb_hc {
18161 -       struct usb_bus *bus;
18162 -       struct virt_root_hub rh;
18163 -       struct etrax_usb_intr_traffic intr;
18164 -} etrax_hc_t;
18165 -
18166 -typedef enum {
18167 -       STARTED,
18168 -       NOT_STARTED,
18169 -       UNLINK,
18170 -       TRANSFER_DONE,
18171 -       WAITING_FOR_DESCR_INTR
18172 -} etrax_usb_urb_state_t;
18173 -
18174 -
18175 -
18176 -typedef struct etrax_usb_urb_priv {
18177 -       /* The first_sb field is used for freeing all SB descriptors belonging
18178 -          to an urb. The corresponding ep descriptor's sub pointer cannot be
18179 -          used for this since the DMA advances the sub pointer as it processes
18180 -          the sb list. */
18181 -       USB_SB_Desc_t *first_sb;
18182 -       /* The last_sb field referes to the last SB descriptor that belongs to
18183 -          this urb. This is important to know so we can free the SB descriptors
18184 -          that ranges between first_sb and last_sb. */
18185 -       USB_SB_Desc_t *last_sb;
18186 -
18187 -       /* The rx_offset field is used in ctrl and bulk traffic to keep track
18188 -          of the offset in the urb's transfer_buffer where incoming data should be
18189 -          copied to. */
18190 -       __u32 rx_offset;
18191 -
18192 -       /* Counter used in isochronous transfers to keep track of the
18193 -          number of packets received/transmitted.  */
18194 -       __u32 isoc_packet_counter;
18195 -
18196 -       /* This field is used to pass information about the urb's current state between
18197 -          the various interrupt handlers (thus marked volatile). */
18198 -       volatile etrax_usb_urb_state_t urb_state;
18199 -
18200 -       /* Connection between the submitted urb and ETRAX epid number */
18201 -       __u8 epid;
18202 -
18203 -       /* The rx_data_list field is used for periodic traffic, to hold
18204 -          received data for later processing in the the complete_urb functions,
18205 -          where the data us copied to the urb's transfer_buffer. Basically, we
18206 -          use this intermediate storage because we don't know when it's safe to
18207 -          reuse the transfer_buffer (FIXME?). */
18208 -       struct list_head rx_data_list;
18209 -} etrax_urb_priv_t;
18210 -
18211 -/* This struct is for passing data from the top half to the bottom half. */
18212 -typedef struct usb_interrupt_registers
18213 -{
18214 -       etrax_hc_t *hc;
18215 -       __u32 r_usb_epid_attn;
18216 -       __u8 r_usb_status;
18217 -       __u16 r_usb_rh_port_status_1;
18218 -       __u16 r_usb_rh_port_status_2;
18219 -       __u32 r_usb_irq_mask_read;
18220 -       __u32 r_usb_fm_number;
18221 -       struct work_struct usb_bh;
18222 -} usb_interrupt_registers_t;
18223 -
18224 -/* This struct is for passing data from the isoc top half to the isoc bottom half. */
18225 -typedef struct usb_isoc_complete_data
18226 -{
18227 -       struct urb *urb;
18228 -       struct work_struct usb_bh;
18229 -} usb_isoc_complete_data_t;
18230 -
18231 -/* This struct holds data we get from the rx descriptors for DMA channel 9
18232 -   for periodic traffic (intr and isoc). */
18233 -typedef struct rx_data
18234 -{
18235 -       void *data;
18236 -       int length;
18237 -       struct list_head list;
18238 -} rx_data_t;
18239 -
18240 -typedef struct urb_entry
18241 -{
18242 -       struct urb *urb;
18243 -       struct list_head list;
18244 -} urb_entry_t;
18245 -
18246 -/* ---------------------------------------------------------------------------
18247 -   Virtual Root HUB
18248 -   ------------------------------------------------------------------------- */
18249 -/* destination of request */
18250 -#define RH_INTERFACE               0x01
18251 -#define RH_ENDPOINT                0x02
18252 -#define RH_OTHER                   0x03
18253 -
18254 -#define RH_CLASS                   0x20
18255 -#define RH_VENDOR                  0x40
18256 -
18257 -/* Requests: bRequest << 8 | bmRequestType */
18258 -#define RH_GET_STATUS           0x0080
18259 -#define RH_CLEAR_FEATURE        0x0100
18260 -#define RH_SET_FEATURE          0x0300
18261 -#define RH_SET_ADDRESS         0x0500
18262 -#define RH_GET_DESCRIPTOR      0x0680
18263 -#define RH_SET_DESCRIPTOR       0x0700
18264 -#define RH_GET_CONFIGURATION   0x0880
18265 -#define RH_SET_CONFIGURATION   0x0900
18266 -#define RH_GET_STATE            0x0280
18267 -#define RH_GET_INTERFACE        0x0A80
18268 -#define RH_SET_INTERFACE        0x0B00
18269 -#define RH_SYNC_FRAME           0x0C80
18270 -/* Our Vendor Specific Request */
18271 -#define RH_SET_EP               0x2000
18272 -
18273 -
18274 -/* Hub port features */
18275 -#define RH_PORT_CONNECTION         0x00
18276 -#define RH_PORT_ENABLE             0x01
18277 -#define RH_PORT_SUSPEND            0x02
18278 -#define RH_PORT_OVER_CURRENT       0x03
18279 -#define RH_PORT_RESET              0x04
18280 -#define RH_PORT_POWER              0x08
18281 -#define RH_PORT_LOW_SPEED          0x09
18282 -#define RH_C_PORT_CONNECTION       0x10
18283 -#define RH_C_PORT_ENABLE           0x11
18284 -#define RH_C_PORT_SUSPEND          0x12
18285 -#define RH_C_PORT_OVER_CURRENT     0x13
18286 -#define RH_C_PORT_RESET            0x14
18287 -
18288 -/* Hub features */
18289 -#define RH_C_HUB_LOCAL_POWER       0x00
18290 -#define RH_C_HUB_OVER_CURRENT      0x01
18291 -
18292 -#define RH_DEVICE_REMOTE_WAKEUP    0x00
18293 -#define RH_ENDPOINT_STALL          0x01
18294 -
18295 -/* Our Vendor Specific feature */
18296 -#define RH_REMOVE_EP               0x00
18297 -
18298 -
18299 -#define RH_ACK                     0x01
18300 -#define RH_REQ_ERR                 -1
18301 -#define RH_NACK                    0x00
18302 -
18303 -/* Field definitions for */
18304 -
18305 -#define USB_IN_command__eol__BITNR      0 /* command macros */
18306 -#define USB_IN_command__eol__WIDTH      1
18307 -#define USB_IN_command__eol__no         0
18308 -#define USB_IN_command__eol__yes        1
18309 -
18310 -#define USB_IN_command__intr__BITNR     3
18311 -#define USB_IN_command__intr__WIDTH     1
18312 -#define USB_IN_command__intr__no        0
18313 -#define USB_IN_command__intr__yes       1
18314 -
18315 -#define USB_IN_status__eop__BITNR       1 /* status macros. */
18316 -#define USB_IN_status__eop__WIDTH       1
18317 -#define USB_IN_status__eop__no          0
18318 -#define USB_IN_status__eop__yes         1
18319 -
18320 -#define USB_IN_status__eot__BITNR       5
18321 -#define USB_IN_status__eot__WIDTH       1
18322 -#define USB_IN_status__eot__no          0
18323 -#define USB_IN_status__eot__yes         1
18324 -
18325 -#define USB_IN_status__error__BITNR     6
18326 -#define USB_IN_status__error__WIDTH     1
18327 -#define USB_IN_status__error__no        0
18328 -#define USB_IN_status__error__yes       1
18329 -
18330 -#define USB_IN_status__nodata__BITNR    7
18331 -#define USB_IN_status__nodata__WIDTH    1
18332 -#define USB_IN_status__nodata__no       0
18333 -#define USB_IN_status__nodata__yes      1
18334 -
18335 -#define USB_IN_status__epid__BITNR      8
18336 -#define USB_IN_status__epid__WIDTH      5
18337 -
18338 -#define USB_EP_command__eol__BITNR      0
18339 -#define USB_EP_command__eol__WIDTH      1
18340 -#define USB_EP_command__eol__no         0
18341 -#define USB_EP_command__eol__yes        1
18342 -
18343 -#define USB_EP_command__eof__BITNR      1
18344 -#define USB_EP_command__eof__WIDTH      1
18345 -#define USB_EP_command__eof__no         0
18346 -#define USB_EP_command__eof__yes        1
18347 -
18348 -#define USB_EP_command__intr__BITNR     3
18349 -#define USB_EP_command__intr__WIDTH     1
18350 -#define USB_EP_command__intr__no        0
18351 -#define USB_EP_command__intr__yes       1
18352 -
18353 -#define USB_EP_command__enable__BITNR   4
18354 -#define USB_EP_command__enable__WIDTH   1
18355 -#define USB_EP_command__enable__no      0
18356 -#define USB_EP_command__enable__yes     1
18357 -
18358 -#define USB_EP_command__hw_valid__BITNR 5
18359 -#define USB_EP_command__hw_valid__WIDTH 1
18360 -#define USB_EP_command__hw_valid__no    0
18361 -#define USB_EP_command__hw_valid__yes   1
18362 -
18363 -#define USB_EP_command__epid__BITNR     8
18364 -#define USB_EP_command__epid__WIDTH     5
18365 -
18366 -#define USB_SB_command__eol__BITNR      0 /* command macros. */
18367 -#define USB_SB_command__eol__WIDTH      1
18368 -#define USB_SB_command__eol__no         0
18369 -#define USB_SB_command__eol__yes        1
18370 -
18371 -#define USB_SB_command__eot__BITNR      1
18372 -#define USB_SB_command__eot__WIDTH      1
18373 -#define USB_SB_command__eot__no         0
18374 -#define USB_SB_command__eot__yes        1
18375 -
18376 -#define USB_SB_command__intr__BITNR     3
18377 -#define USB_SB_command__intr__WIDTH     1
18378 -#define USB_SB_command__intr__no        0
18379 -#define USB_SB_command__intr__yes       1
18380 -
18381 -#define USB_SB_command__tt__BITNR       4
18382 -#define USB_SB_command__tt__WIDTH       2
18383 -#define USB_SB_command__tt__zout        0
18384 -#define USB_SB_command__tt__in          1
18385 -#define USB_SB_command__tt__out         2
18386 -#define USB_SB_command__tt__setup       3
18387 -
18388 -
18389 -#define USB_SB_command__rem__BITNR      8
18390 -#define USB_SB_command__rem__WIDTH      6
18391 -
18392 -#define USB_SB_command__full__BITNR     6
18393 -#define USB_SB_command__full__WIDTH     1
18394 -#define USB_SB_command__full__no        0
18395 -#define USB_SB_command__full__yes       1
18396 -
18397 -#endif
18398 --- linux-2.6.19.2.orig/drivers/usb/host/hc-crisv10.h   1970-01-01 01:00:00.000000000 +0100
18399 +++ linux-2.6.19.2.dev/drivers/usb/host/hc-crisv10.h    2006-01-27 13:59:58.000000000 +0100
18400 @@ -0,0 +1,330 @@
18401 +#ifndef __LINUX_ETRAX_USB_H
18402 +#define __LINUX_ETRAX_USB_H
18403 +
18404 +#include <linux/types.h>
18405 +#include <linux/list.h>
18406 +
18407 +struct USB_IN_Desc {
18408 +  volatile __u16 sw_len;
18409 +  volatile __u16 command;
18410 +  volatile unsigned long next;
18411 +  volatile unsigned long buf;
18412 +  volatile __u16 hw_len;
18413 +  volatile __u16 status;
18414 +};
18415 +
18416 +struct USB_SB_Desc {
18417 +  volatile __u16 sw_len;
18418 +  volatile __u16 command;
18419 +  volatile unsigned long next;
18420 +  volatile unsigned long buf;
18421 +};
18422 +
18423 +struct USB_EP_Desc {
18424 +  volatile __u16 hw_len;
18425 +  volatile __u16 command;
18426 +  volatile unsigned long sub;
18427 +  volatile unsigned long next;
18428 +};
18429 +
18430 +
18431 +/* Root Hub port status struct */
18432 +struct crisv10_rh {
18433 +  volatile __u16 wPortChange[2];
18434 +  volatile __u16 wPortStatusPrev[2];
18435 +};
18436 +
18437 +/* HCD description */
18438 +struct crisv10_hcd {
18439 +  spinlock_t           lock;
18440 +  __u8                 num_ports;
18441 +  __u8                  running;
18442 +};
18443 +
18444 +
18445 +/* Endpoint HC private data description */
18446 +struct crisv10_ep_priv {
18447 +  int epid;
18448 +};
18449 +
18450 +/* Additional software state info for a USB Controller epid */
18451 +struct etrax_epid {
18452 +  __u8 inuse;       /* !0 = setup in Etrax and used for a endpoint */
18453 +  __u8 disabled;    /* !0 = Temporarly disabled to avoid resubmission */
18454 +  __u8 type;        /* Setup as: PIPE_BULK, PIPE_CONTROL ... */
18455 +  __u8 out_traffic; /* !0 = This epid is for out traffic */
18456 +};
18457 +
18458 +/* Struct to hold information of scheduled later URB completion */
18459 +struct urb_later_data {
18460 +  struct work_struct ws;
18461 +  struct usb_hcd *hcd;
18462 +  struct urb *urb;
18463 +  int urb_num;
18464 +  int status;
18465 +};
18466 +
18467 +
18468 +typedef enum {
18469 +  STARTED,
18470 +  NOT_STARTED,
18471 +  UNLINK,
18472 +} crisv10_urb_state_t;
18473 +
18474 +
18475 +struct crisv10_urb_priv {
18476 +  /* Sequence number for this URB. Every new submited URB gets this from
18477 +     a incrementing counter. Used when a URB is scheduled for later finish to
18478 +     be sure that the intended URB hasn't already been completed (device
18479 +     drivers has a tendency to reuse URBs once they are completed, causing us
18480 +     to not be able to single old ones out only based on the URB pointer.) */
18481 +  __u32 urb_num;
18482 +
18483 +  /* The first_sb field is used for freeing all SB descriptors belonging
18484 +     to an urb. The corresponding ep descriptor's sub pointer cannot be
18485 +     used for this since the DMA advances the sub pointer as it processes
18486 +     the sb list. */
18487 +  struct USB_SB_Desc *first_sb;
18488 +
18489 +  /* The last_sb field referes to the last SB descriptor that belongs to
18490 +     this urb. This is important to know so we can free the SB descriptors
18491 +     that ranges between first_sb and last_sb. */
18492 +  struct USB_SB_Desc *last_sb;
18493 +  
18494 +  /* The rx_offset field is used in ctrl and bulk traffic to keep track
18495 +     of the offset in the urb's transfer_buffer where incoming data should be
18496 +     copied to. */
18497 +  __u32 rx_offset;
18498 +  
18499 +  /* Counter used in isochronous transfers to keep track of the
18500 +     number of packets received/transmitted.  */
18501 +  __u32 isoc_packet_counter;
18502 +
18503 +  /* Flag that marks if this Isoc Out URB has finished it's transfer. Used
18504 +     because several URBs can be finished before list is processed */
18505 +  __u8  isoc_out_done;
18506 +  
18507 +  /* This field is used to pass information about the urb's current state
18508 +     between the various interrupt handlers (thus marked volatile). */
18509 +  volatile crisv10_urb_state_t urb_state;
18510 +  
18511 +  /* In Ctrl transfers consist of (at least) 3 packets: SETUP, IN and ZOUT.
18512 +     When DMA8 sub-channel 2 has processed the SB list for this sequence we
18513 +     get a interrupt. We also get a interrupt for In transfers and which
18514 +     one of these interrupts that comes first depends of data size and device.
18515 +     To be sure that we have got both interrupts before we complete the URB
18516 +     we have these to flags that shows which part that has completed.
18517 +     We can then check when we get one of the interrupts that if the other has
18518 +     occured it's safe for us to complete the URB, otherwise we set appropriate
18519 +     flag and do the completion when we get the other interrupt. */
18520 +  volatile unsigned char ctrl_zout_done;
18521 +  volatile unsigned char ctrl_rx_done;
18522 +
18523 +  /* Connection between the submitted urb and ETRAX epid number */
18524 +  __u8 epid;
18525 +  
18526 +  /* The rx_data_list field is used for periodic traffic, to hold
18527 +     received data for later processing in the the complete_urb functions,
18528 +     where the data us copied to the urb's transfer_buffer. Basically, we
18529 +     use this intermediate storage because we don't know when it's safe to
18530 +     reuse the transfer_buffer (FIXME?). */
18531 +  struct list_head rx_data_list;
18532 +
18533 +
18534 +  /* The interval time rounded up to closest 2^N */
18535 +  int interval;
18536 +
18537 +  /* Pool of EP descriptors needed if it's a INTR transfer.
18538 +     Amount of EPs in pool correspons to how many INTR that should
18539 +     be inserted in TxIntrEPList (max 128, defined by MAX_INTR_INTERVAL) */
18540 +  struct USB_EP_Desc* intr_ep_pool[128];
18541 +
18542 +  /* The mount of EPs allocated for this INTR URB */
18543 +  int intr_ep_pool_length;
18544 +
18545 +  /* Pointer to info struct if URB is scheduled to be finished later */
18546 +  struct urb_later_data* later_data;
18547 +};
18548 +
18549 +
18550 +/* This struct is for passing data from the top half to the bottom half irq
18551 +   handlers */
18552 +struct crisv10_irq_reg {
18553 +  struct usb_hcd* hcd;
18554 +  __u32 r_usb_epid_attn;
18555 +  __u8 r_usb_status;
18556 +  __u16 r_usb_rh_port_status_1;
18557 +  __u16 r_usb_rh_port_status_2;
18558 +  __u32 r_usb_irq_mask_read;
18559 +  __u32 r_usb_fm_number;
18560 +  struct work_struct usb_bh;
18561 +};
18562 +
18563 +
18564 +/* This struct is for passing data from the isoc top half to the isoc bottom
18565 +   half. */
18566 +struct crisv10_isoc_complete_data {
18567 +  struct usb_hcd *hcd;
18568 +  struct urb *urb;
18569 +  struct work_struct usb_bh;
18570 +};
18571 +
18572 +/* Entry item for URB lists for each endpint */
18573 +typedef struct urb_entry
18574 +{
18575 +       struct urb *urb;
18576 +       struct list_head list;
18577 +} urb_entry_t;
18578 +
18579 +/* ---------------------------------------------------------------------------
18580 +   Virtual Root HUB
18581 +   ------------------------------------------------------------------------- */
18582 +/* destination of request */
18583 +#define RH_INTERFACE               0x01
18584 +#define RH_ENDPOINT                0x02
18585 +#define RH_OTHER                   0x03
18586 +
18587 +#define RH_CLASS                   0x20
18588 +#define RH_VENDOR                  0x40
18589 +
18590 +/* Requests: bRequest << 8 | bmRequestType */
18591 +#define RH_GET_STATUS           0x0080
18592 +#define RH_CLEAR_FEATURE        0x0100
18593 +#define RH_SET_FEATURE          0x0300
18594 +#define RH_SET_ADDRESS         0x0500
18595 +#define RH_GET_DESCRIPTOR      0x0680
18596 +#define RH_SET_DESCRIPTOR       0x0700
18597 +#define RH_GET_CONFIGURATION   0x0880
18598 +#define RH_SET_CONFIGURATION   0x0900
18599 +#define RH_GET_STATE            0x0280
18600 +#define RH_GET_INTERFACE        0x0A80
18601 +#define RH_SET_INTERFACE        0x0B00
18602 +#define RH_SYNC_FRAME           0x0C80
18603 +/* Our Vendor Specific Request */
18604 +#define RH_SET_EP               0x2000
18605 +
18606 +
18607 +/* Hub port features */
18608 +#define RH_PORT_CONNECTION         0x00
18609 +#define RH_PORT_ENABLE             0x01
18610 +#define RH_PORT_SUSPEND            0x02
18611 +#define RH_PORT_OVER_CURRENT       0x03
18612 +#define RH_PORT_RESET              0x04
18613 +#define RH_PORT_POWER              0x08
18614 +#define RH_PORT_LOW_SPEED          0x09
18615 +#define RH_C_PORT_CONNECTION       0x10
18616 +#define RH_C_PORT_ENABLE           0x11
18617 +#define RH_C_PORT_SUSPEND          0x12
18618 +#define RH_C_PORT_OVER_CURRENT     0x13
18619 +#define RH_C_PORT_RESET            0x14
18620 +
18621 +/* Hub features */
18622 +#define RH_C_HUB_LOCAL_POWER       0x00
18623 +#define RH_C_HUB_OVER_CURRENT      0x01
18624 +
18625 +#define RH_DEVICE_REMOTE_WAKEUP    0x00
18626 +#define RH_ENDPOINT_STALL          0x01
18627 +
18628 +/* Our Vendor Specific feature */
18629 +#define RH_REMOVE_EP               0x00
18630 +
18631 +
18632 +#define RH_ACK                     0x01
18633 +#define RH_REQ_ERR                 -1
18634 +#define RH_NACK                    0x00
18635 +
18636 +/* Field definitions for */
18637 +
18638 +#define USB_IN_command__eol__BITNR      0 /* command macros */
18639 +#define USB_IN_command__eol__WIDTH      1
18640 +#define USB_IN_command__eol__no         0
18641 +#define USB_IN_command__eol__yes        1
18642 +
18643 +#define USB_IN_command__intr__BITNR     3
18644 +#define USB_IN_command__intr__WIDTH     1
18645 +#define USB_IN_command__intr__no        0
18646 +#define USB_IN_command__intr__yes       1
18647 +
18648 +#define USB_IN_status__eop__BITNR       1 /* status macros. */
18649 +#define USB_IN_status__eop__WIDTH       1
18650 +#define USB_IN_status__eop__no          0
18651 +#define USB_IN_status__eop__yes         1
18652 +
18653 +#define USB_IN_status__eot__BITNR       5
18654 +#define USB_IN_status__eot__WIDTH       1
18655 +#define USB_IN_status__eot__no          0
18656 +#define USB_IN_status__eot__yes         1
18657 +
18658 +#define USB_IN_status__error__BITNR     6
18659 +#define USB_IN_status__error__WIDTH     1
18660 +#define USB_IN_status__error__no        0
18661 +#define USB_IN_status__error__yes       1
18662 +
18663 +#define USB_IN_status__nodata__BITNR    7
18664 +#define USB_IN_status__nodata__WIDTH    1
18665 +#define USB_IN_status__nodata__no       0
18666 +#define USB_IN_status__nodata__yes      1
18667 +
18668 +#define USB_IN_status__epid__BITNR      8
18669 +#define USB_IN_status__epid__WIDTH      5
18670 +
18671 +#define USB_EP_command__eol__BITNR      0
18672 +#define USB_EP_command__eol__WIDTH      1
18673 +#define USB_EP_command__eol__no         0
18674 +#define USB_EP_command__eol__yes        1
18675 +
18676 +#define USB_EP_command__eof__BITNR      1
18677 +#define USB_EP_command__eof__WIDTH      1
18678 +#define USB_EP_command__eof__no         0
18679 +#define USB_EP_command__eof__yes        1
18680 +
18681 +#define USB_EP_command__intr__BITNR     3
18682 +#define USB_EP_command__intr__WIDTH     1
18683 +#define USB_EP_command__intr__no        0
18684 +#define USB_EP_command__intr__yes       1
18685 +
18686 +#define USB_EP_command__enable__BITNR   4
18687 +#define USB_EP_command__enable__WIDTH   1
18688 +#define USB_EP_command__enable__no      0
18689 +#define USB_EP_command__enable__yes     1
18690 +
18691 +#define USB_EP_command__hw_valid__BITNR 5
18692 +#define USB_EP_command__hw_valid__WIDTH 1
18693 +#define USB_EP_command__hw_valid__no    0
18694 +#define USB_EP_command__hw_valid__yes   1
18695 +
18696 +#define USB_EP_command__epid__BITNR     8
18697 +#define USB_EP_command__epid__WIDTH     5
18698 +
18699 +#define USB_SB_command__eol__BITNR      0 /* command macros. */
18700 +#define USB_SB_command__eol__WIDTH      1
18701 +#define USB_SB_command__eol__no         0
18702 +#define USB_SB_command__eol__yes        1
18703 +
18704 +#define USB_SB_command__eot__BITNR      1
18705 +#define USB_SB_command__eot__WIDTH      1
18706 +#define USB_SB_command__eot__no         0
18707 +#define USB_SB_command__eot__yes        1
18708 +
18709 +#define USB_SB_command__intr__BITNR     3
18710 +#define USB_SB_command__intr__WIDTH     1
18711 +#define USB_SB_command__intr__no        0
18712 +#define USB_SB_command__intr__yes       1
18713 +
18714 +#define USB_SB_command__tt__BITNR       4
18715 +#define USB_SB_command__tt__WIDTH       2
18716 +#define USB_SB_command__tt__zout        0
18717 +#define USB_SB_command__tt__in          1
18718 +#define USB_SB_command__tt__out         2
18719 +#define USB_SB_command__tt__setup       3
18720 +
18721 +
18722 +#define USB_SB_command__rem__BITNR      8
18723 +#define USB_SB_command__rem__WIDTH      6
18724 +
18725 +#define USB_SB_command__full__BITNR     6
18726 +#define USB_SB_command__full__WIDTH     1
18727 +#define USB_SB_command__full__no        0
18728 +#define USB_SB_command__full__yes       1
18729 +
18730 +#endif
18731 diff -urN linux-2.6.19.2.orig/drivers/net/cris/Makefile linux-2.6.19.2.dev/drivers/net/cris/Makefile
18732 --- linux-2.6.19.2.orig/drivers/net/cris/Makefile       2007-01-10 20:10:37.000000000 +0100
18733 +++ linux-2.6.19.2.dev/drivers/net/cris/Makefile        2005-01-04 13:09:12.000000000 +0100
18734 @@ -1 +1,2 @@
18735  obj-$(CONFIG_ETRAX_ARCH_V10) += eth_v10.o
18736 +obj-$(CONFIG_ETRAX_ARCH_V32) += eth_v32.o
18737 diff -urN linux-2.6.19.2.orig/drivers/net/cris/eth_v10.c linux-2.6.19.2.dev/drivers/net/cris/eth_v10.c
18738 --- linux-2.6.19.2.orig/drivers/net/cris/eth_v10.c      2007-01-10 20:10:37.000000000 +0100
18739 +++ linux-2.6.19.2.dev/drivers/net/cris/eth_v10.c       2007-01-15 16:35:48.000000000 +0100
18740 @@ -1,221 +1,10 @@
18741 -/* $Id: ethernet.c,v 1.31 2004/10/18 14:49:03 starvik Exp $
18742 - *
18743 - * e100net.c: A network driver for the ETRAX 100LX network controller.
18744 +/*
18745 + * Driver for the ETRAX 100LX network controller.
18746   *
18747 - * Copyright (c) 1998-2002 Axis Communications AB.
18748 + * Copyright (c) 1998-2006 Axis Communications AB.
18749   *
18750   * The outline of this driver comes from skeleton.c.
18751   *
18752 - * $Log: ethernet.c,v $
18753 - * Revision 1.31  2004/10/18 14:49:03  starvik
18754 - * Use RX interrupt as random source
18755 - *
18756 - * Revision 1.30  2004/09/29 10:44:04  starvik
18757 - * Enabed MAC-address output again
18758 - *
18759 - * Revision 1.29  2004/08/24 07:14:05  starvik
18760 - * Make use of generic MDIO interface and constants.
18761 - *
18762 - * Revision 1.28  2004/08/20 09:37:11  starvik
18763 - * Added support for Intel LXT972A. Creds to Randy Scarborough.
18764 - *
18765 - * Revision 1.27  2004/08/16 12:37:22  starvik
18766 - * Merge of Linux 2.6.8
18767 - *
18768 - * Revision 1.25  2004/06/21 10:29:57  starvik
18769 - * Merge of Linux 2.6.7
18770 - *
18771 - * Revision 1.23  2004/06/09 05:29:22  starvik
18772 - * Avoid any race where R_DMA_CH1_FIRST is NULL (may trigger cache bug).
18773 - *
18774 - * Revision 1.22  2004/05/14 07:58:03  starvik
18775 - * Merge of changes from 2.4
18776 - *
18777 - * Revision 1.20  2004/03/11 11:38:40  starvik
18778 - * Merge of Linux 2.6.4
18779 - *
18780 - * Revision 1.18  2003/12/03 13:45:46  starvik
18781 - * Use hardware pad for short packets to prevent information leakage.
18782 - *
18783 - * Revision 1.17  2003/07/04 08:27:37  starvik
18784 - * Merge of Linux 2.5.74
18785 - *
18786 - * Revision 1.16  2003/04/24 08:28:22  starvik
18787 - * New LED behaviour: LED off when no link
18788 - *
18789 - * Revision 1.15  2003/04/09 05:20:47  starvik
18790 - * Merge of Linux 2.5.67
18791 - *
18792 - * Revision 1.13  2003/03/06 16:11:01  henriken
18793 - * Off by one error in group address register setting.
18794 - *
18795 - * Revision 1.12  2003/02/27 17:24:19  starvik
18796 - * Corrected Rev to Revision
18797 - *
18798 - * Revision 1.11  2003/01/24 09:53:21  starvik
18799 - * Oops. Initialize GA to 0, not to 1
18800 - *
18801 - * Revision 1.10  2003/01/24 09:50:55  starvik
18802 - * Initialize GA_0 and GA_1 to 0 to avoid matching of unwanted packets
18803 - *
18804 - * Revision 1.9  2002/12/13 07:40:58  starvik
18805 - * Added basic ethtool interface
18806 - * Handled out of memory when allocating new buffers
18807 - *
18808 - * Revision 1.8  2002/12/11 13:13:57  starvik
18809 - * Added arch/ to v10 specific includes
18810 - * Added fix from Linux 2.4 in serial.c (flush_to_flip_buffer)
18811 - *
18812 - * Revision 1.7  2002/11/26 09:41:42  starvik
18813 - * Added e100_set_config (standard interface to set media type)
18814 - * Added protection against preemptive scheduling
18815 - * Added standard MII ioctls
18816 - *
18817 - * Revision 1.6  2002/11/21 07:18:18  starvik
18818 - * Timers must be initialized in 2.5.48
18819 - *
18820 - * Revision 1.5  2002/11/20 11:56:11  starvik
18821 - * Merge of Linux 2.5.48
18822 - *
18823 - * Revision 1.4  2002/11/18 07:26:46  starvik
18824 - * Linux 2.5 port of latest Linux 2.4 ethernet driver
18825 - *
18826 - * Revision 1.33  2002/10/02 20:16:17  hp
18827 - * SETF, SETS: Use underscored IO_x_ macros rather than incorrect token concatenation
18828 - *
18829 - * Revision 1.32  2002/09/16 06:05:58  starvik
18830 - * Align memory returned by dev_alloc_skb
18831 - * Moved handling of sent packets to interrupt to avoid reference counting problem
18832 - *
18833 - * Revision 1.31  2002/09/10 13:28:23  larsv
18834 - * Return -EINVAL for unknown ioctls to avoid confusing tools that tests
18835 - * for supported functionality by issuing special ioctls, i.e. wireless
18836 - * extensions.
18837 - *
18838 - * Revision 1.30  2002/05/07 18:50:08  johana
18839 - * Correct spelling in comments.
18840 - *
18841 - * Revision 1.29  2002/05/06 05:38:49  starvik
18842 - * Performance improvements:
18843 - *    Large packets are not copied (breakpoint set to 256 bytes)
18844 - *    The cache bug workaround is delayed until half of the receive list
18845 - *      has been used
18846 - *    Added transmit list
18847 - *    Transmit interrupts are only enabled when transmit queue is full
18848 - *
18849 - * Revision 1.28.2.1  2002/04/30 08:15:51  starvik
18850 - * Performance improvements:
18851 - *   Large packets are not copied (breakpoint set to 256 bytes)
18852 - *   The cache bug workaround is delayed until half of the receive list
18853 - *     has been used.
18854 - *   Added transmit list
18855 - *   Transmit interrupts are only enabled when transmit queue is full
18856 - *
18857 - * Revision 1.28  2002/04/22 11:47:21  johana
18858 - * Fix according to 2.4.19-pre7. time_after/time_before and
18859 - * missing end of comment.
18860 - * The patch has a typo for ethernet.c in e100_clear_network_leds(),
18861 - *  that is fixed here.
18862 - *
18863 - * Revision 1.27  2002/04/12 11:55:11  bjornw
18864 - * Added TODO
18865 - *
18866 - * Revision 1.26  2002/03/15 17:11:02  bjornw
18867 - * Use prepare_rx_descriptor after the CPU has touched the receiving descs
18868 - *
18869 - * Revision 1.25  2002/03/08 13:07:53  bjornw
18870 - * Unnecessary spinlock removed
18871 - *
18872 - * Revision 1.24  2002/02/20 12:57:43  fredriks
18873 - * Replaced MIN() with min().
18874 - *
18875 - * Revision 1.23  2002/02/20 10:58:14  fredriks
18876 - * Strip the Ethernet checksum (4 bytes) before forwarding a frame to upper layers.
18877 - *
18878 - * Revision 1.22  2002/01/30 07:48:22  matsfg
18879 - * Initiate R_NETWORK_TR_CTRL
18880 - *
18881 - * Revision 1.21  2001/11/23 11:54:49  starvik
18882 - * Added IFF_PROMISC and IFF_ALLMULTI handling in set_multicast_list
18883 - * Removed compiler warnings
18884 - *
18885 - * Revision 1.20  2001/11/12 19:26:00  pkj
18886 - * * Corrected e100_negotiate() to not assign half to current_duplex when
18887 - *   it was supposed to compare them...
18888 - * * Cleaned up failure handling in e100_open().
18889 - * * Fixed compiler warnings.
18890 - *
18891 - * Revision 1.19  2001/11/09 07:43:09  starvik
18892 - * Added full duplex support
18893 - * Added ioctl to set speed and duplex
18894 - * Clear LED timer only runs when LED is lit
18895 - *
18896 - * Revision 1.18  2001/10/03 14:40:43  jonashg
18897 - * Update rx_bytes counter.
18898 - *
18899 - * Revision 1.17  2001/06/11 12:43:46  olof
18900 - * Modified defines for network LED behavior
18901 - *
18902 - * Revision 1.16  2001/05/30 06:12:46  markusl
18903 - * TxDesc.next should not be set to NULL
18904 - *
18905 - * Revision 1.15  2001/05/29 10:27:04  markusl
18906 - * Updated after review remarks:
18907 - * +Use IO_EXTRACT
18908 - * +Handle underrun
18909 - *
18910 - * Revision 1.14  2001/05/29 09:20:14  jonashg
18911 - * Use driver name on printk output so one can tell which driver that complains.
18912 - *
18913 - * Revision 1.13  2001/05/09 12:35:59  johana
18914 - * Use DMA_NBR and IRQ_NBR defines from dma.h and irq.h
18915 - *
18916 - * Revision 1.12  2001/04/05 11:43:11  tobiasa
18917 - * Check dev before panic.
18918 - *
18919 - * Revision 1.11  2001/04/04 11:21:05  markusl
18920 - * Updated according to review remarks
18921 - *
18922 - * Revision 1.10  2001/03/26 16:03:06  bjornw
18923 - * Needs linux/config.h
18924 - *
18925 - * Revision 1.9  2001/03/19 14:47:48  pkj
18926 - * * Make sure there is always a pause after the network LEDs are
18927 - *   changed so they will not look constantly lit during heavy traffic.
18928 - * * Always use HZ when setting times relative to jiffies.
18929 - * * Use LED_NETWORK_SET() when setting the network LEDs.
18930 - *
18931 - * Revision 1.8  2001/02/27 13:52:48  bjornw
18932 - * malloc.h -> slab.h
18933 - *
18934 - * Revision 1.7  2001/02/23 13:46:38  bjornw
18935 - * Spellling check
18936 - *
18937 - * Revision 1.6  2001/01/26 15:21:04  starvik
18938 - * Don't disable interrupts while reading MDIO registers (MDIO is slow)
18939 - * Corrected promiscuous mode
18940 - * Improved deallocation of IRQs ("ifconfig eth0 down" now works)
18941 - *
18942 - * Revision 1.5  2000/11/29 17:22:22  bjornw
18943 - * Get rid of the udword types legacy stuff
18944 - *
18945 - * Revision 1.4  2000/11/22 16:36:09  bjornw
18946 - * Please marketing by using the correct case when spelling Etrax.
18947 - *
18948 - * Revision 1.3  2000/11/21 16:43:04  bjornw
18949 - * Minor short->int change
18950 - *
18951 - * Revision 1.2  2000/11/08 14:27:57  bjornw
18952 - * 2.4 port
18953 - *
18954 - * Revision 1.1  2000/11/06 13:56:00  bjornw
18955 - * Verbatim copy of the 1.24 version of e100net.c from elinux
18956 - *
18957 - * Revision 1.24  2000/10/04 15:55:23  bjornw
18958 - * * Use virt_to_phys etc. for DMA addresses
18959 - * * Removed bogus CHECKSUM_UNNECESSARY
18960 - *
18961 - *
18962   */
18963  
18964  
18965 @@ -251,6 +40,7 @@
18966  #include <asm/bitops.h>
18967  #include <asm/ethernet.h>
18968  #include <asm/cache.h>
18969 +#include <asm/arch/io_interface_mux.h>
18970  
18971  //#define ETHDEBUG
18972  #define D(x)
18973 @@ -280,6 +70,9 @@
18974          * by this lock as well.
18975          */
18976         spinlock_t lock;
18977 +
18978 +       spinlock_t led_lock; /* Protect LED state */
18979 +       spinlock_t transceiver_lock; /* Protect transceiver state. */
18980  };
18981  
18982  typedef struct etrax_eth_descr
18983 @@ -296,8 +89,6 @@
18984         void (*check_duplex)(struct net_device* dev);
18985  };
18986  
18987 -struct transceiver_ops* transceiver;
18988 -
18989  /* Duplex settings */
18990  enum duplex
18991  {
18992 @@ -308,7 +99,7 @@
18993  
18994  /* Dma descriptors etc. */
18995  
18996 -#define MAX_MEDIA_DATA_SIZE 1518
18997 +#define MAX_MEDIA_DATA_SIZE 1522
18998  
18999  #define MIN_PACKET_LEN      46
19000  #define ETHER_HEAD_LEN      14
19001 @@ -332,9 +123,9 @@
19002  #define MDIO_TDK_DIAGNOSTIC_DPLX          0x800
19003  
19004  /*Intel LXT972A specific*/
19005 -#define MDIO_INT_STATUS_REG_2                  0x0011
19006 -#define MDIO_INT_FULL_DUPLEX_IND               ( 1 << 9 )
19007 -#define MDIO_INT_SPEED                         ( 1 << 14 )
19008 +#define MDIO_INT_STATUS_REG_2            0x0011
19009 +#define MDIO_INT_FULL_DUPLEX_IND       (1 << 9)
19010 +#define MDIO_INT_SPEED                (1 << 14)
19011  
19012  /* Network flash constants */
19013  #define NET_FLASH_TIME                  (HZ/50) /* 20 ms */
19014 @@ -345,8 +136,8 @@
19015  #define NO_NETWORK_ACTIVITY 0
19016  #define NETWORK_ACTIVITY    1
19017  
19018 -#define NBR_OF_RX_DESC     64
19019 -#define NBR_OF_TX_DESC     256
19020 +#define NBR_OF_RX_DESC     32
19021 +#define NBR_OF_TX_DESC     16
19022  
19023  /* Large packets are sent directly to upper layers while small packets are */
19024  /* copied (to reduce memory waste). The following constant decides the breakpoint */
19025 @@ -368,7 +159,6 @@
19026  static etrax_eth_descr *myNextRxDesc;  /* Points to the next descriptor to
19027                                            to be processed */
19028  static etrax_eth_descr *myLastRxDesc;  /* The last processed descriptor */
19029 -static etrax_eth_descr *myPrevRxDesc;  /* The descriptor right before myNextRxDesc */
19030  
19031  static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32)));
19032  
19033 @@ -378,7 +168,6 @@
19034  static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32)));
19035  
19036  static unsigned int network_rec_config_shadow = 0;
19037 -static unsigned int mdio_phy_addr; /* Transciever address */
19038  
19039  static unsigned int network_tr_ctrl_shadow = 0;
19040  
19041 @@ -412,7 +201,7 @@
19042  static void e100_tx_timeout(struct net_device *dev);
19043  static struct net_device_stats *e100_get_stats(struct net_device *dev);
19044  static void set_multicast_list(struct net_device *dev);
19045 -static void e100_hardware_send_packet(char *buf, int length);
19046 +static void e100_hardware_send_packet(struct net_local* np, char *buf, int length);
19047  static void update_rx_stats(struct net_device_stats *);
19048  static void update_tx_stats(struct net_device_stats *);
19049  static int e100_probe_transceiver(struct net_device* dev);
19050 @@ -435,7 +224,10 @@
19051  static void e100_set_network_leds(int active);
19052  
19053  static const struct ethtool_ops e100_ethtool_ops;
19054 -
19055 +#if defined(CONFIG_ETRAX_NO_PHY)
19056 +static void dummy_check_speed(struct net_device* dev);
19057 +static void dummy_check_duplex(struct net_device* dev);
19058 +#else
19059  static void broadcom_check_speed(struct net_device* dev);
19060  static void broadcom_check_duplex(struct net_device* dev);
19061  static void tdk_check_speed(struct net_device* dev);
19062 @@ -444,16 +236,29 @@
19063  static void intel_check_duplex(struct net_device* dev);
19064  static void generic_check_speed(struct net_device* dev);
19065  static void generic_check_duplex(struct net_device* dev);
19066 +#endif
19067 +#ifdef CONFIG_NET_POLL_CONTROLLER
19068 +static void e100_netpoll(struct net_device* dev);
19069 +#endif
19070 +
19071 +static int autoneg_normal = 1;
19072  
19073  struct transceiver_ops transceivers[] =
19074  {
19075 +#if defined(CONFIG_ETRAX_NO_PHY)
19076 +       {0x0000, dummy_check_speed, dummy_check_duplex}        /* Dummy */
19077 +#else
19078         {0x1018, broadcom_check_speed, broadcom_check_duplex},  /* Broadcom */
19079         {0xC039, tdk_check_speed, tdk_check_duplex},            /* TDK 2120 */
19080         {0x039C, tdk_check_speed, tdk_check_duplex},            /* TDK 2120C */
19081 -        {0x04de, intel_check_speed, intel_check_duplex},       /* Intel LXT972A*/
19082 +       {0x04de, intel_check_speed, intel_check_duplex},        /* Intel LXT972A*/
19083         {0x0000, generic_check_speed, generic_check_duplex}     /* Generic, must be last */
19084 +#endif
19085  };
19086  
19087 +struct transceiver_ops* transceiver = &transceivers[0];
19088 +static unsigned int mdio_phy_addr = 0; /* PHY address on MDIO bus */
19089 +
19090  #define tx_done(dev) (*R_DMA_CH0_CMD == 0)
19091  
19092  /*
19093 @@ -468,18 +273,26 @@
19094  etrax_ethernet_init(void)
19095  {
19096         struct net_device *dev;
19097 -        struct net_local* np;
19098 +       struct net_local* np;
19099         int i, err;
19100  
19101         printk(KERN_INFO
19102 -              "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 2000-2003 Axis Communications AB\n");
19103 -
19104 +              "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2006 Axis Communications AB\n");
19105 +       
19106 +       if (cris_request_io_interface(if_eth, cardname)) {
19107 +               printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n");
19108 +               return -EBUSY;
19109 +       }
19110 +       
19111         dev = alloc_etherdev(sizeof(struct net_local));
19112 -       np = dev->priv;
19113 -
19114         if (!dev)
19115                 return -ENOMEM;
19116 +       
19117 +       np = netdev_priv(dev);
19118  
19119 +       /* we do our own locking */
19120 +       dev->features |= NETIF_F_LLTX;
19121 +               
19122         dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */
19123  
19124         /* now setup our etrax specific stuff */
19125 @@ -495,18 +308,26 @@
19126         dev->get_stats          = e100_get_stats;
19127         dev->set_multicast_list = set_multicast_list;
19128         dev->set_mac_address    = e100_set_mac_address;
19129 -       dev->ethtool_ops        = &e100_ethtool_ops;
19130 +       dev->ethtool_ops        = &e100_ethtool_ops;
19131         dev->do_ioctl           = e100_ioctl;
19132 -       dev->set_config         = e100_set_config;
19133 +       dev->set_config         = e100_set_config;
19134         dev->tx_timeout         = e100_tx_timeout;
19135 +#ifdef CONFIG_NET_POLL_CONTROLLER
19136 +       dev->poll_controller = e100_netpoll;
19137 +#endif
19138 +
19139 +       spin_lock_init(&np->lock);
19140 +       spin_lock_init(&np->led_lock);
19141 +       spin_lock_init(&np->transceiver_lock);
19142  
19143         /* Initialise the list of Etrax DMA-descriptors */
19144  
19145         /* Initialise receive descriptors */
19146  
19147         for (i = 0; i < NBR_OF_RX_DESC; i++) {
19148 -               /* Allocate two extra cachelines to make sure that buffer used by DMA
19149 -                * does not share cacheline with any other data (to avoid cache bug)
19150 +               /* Allocate two extra cachelines to make sure that buffer used
19151 +                * by DMA does not share cacheline with any other data (to
19152 +                * avoid cache bug)
19153                  */
19154                 RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
19155                 if (!RxDescList[i].skb)
19156 @@ -517,6 +338,7 @@
19157                 RxDescList[i].descr.buf    = L1_CACHE_ALIGN(virt_to_phys(RxDescList[i].skb->data));
19158                 RxDescList[i].descr.status = 0;
19159                 RxDescList[i].descr.hw_len = 0;
19160 +
19161                 prepare_rx_descriptor(&RxDescList[i].descr);
19162         }
19163  
19164 @@ -542,7 +364,6 @@
19165  
19166         myNextRxDesc  = &RxDescList[0];
19167         myLastRxDesc  = &RxDescList[NBR_OF_RX_DESC - 1];
19168 -       myPrevRxDesc  = &RxDescList[NBR_OF_RX_DESC - 1];
19169         myFirstTxDesc = &TxDescList[0];
19170         myNextTxDesc  = &TxDescList[0];
19171         myLastTxDesc  = &TxDescList[NBR_OF_TX_DESC - 1];
19172 @@ -563,18 +384,19 @@
19173         current_speed = 10;
19174         current_speed_selection = 0; /* Auto */
19175         speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
19176 -        duplex_timer.data = (unsigned long)dev;
19177 +       speed_timer.data = (unsigned long)dev;
19178         speed_timer.function = e100_check_speed;
19179  
19180         clear_led_timer.function = e100_clear_network_leds;
19181 +       clear_led_timer.data = (unsigned long)dev;
19182  
19183         full_duplex = 0;
19184         current_duplex = autoneg;
19185         duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
19186 -        duplex_timer.data = (unsigned long)dev;
19187 +       duplex_timer.data = (unsigned long)dev;
19188         duplex_timer.function = e100_check_duplex;
19189  
19190 -        /* Initialize mii interface */
19191 +       /* Initialize mii interface */
19192         np->mii_if.phy_id = mdio_phy_addr;
19193         np->mii_if.phy_id_mask = 0x1f;
19194         np->mii_if.reg_num_mask = 0x1f;
19195 @@ -586,6 +408,9 @@
19196         /* unwanted addresses are matched */
19197         *R_NETWORK_GA_0 = 0x00000000;
19198         *R_NETWORK_GA_1 = 0x00000000;
19199 +
19200 +       /* Initialize next time the led can flash */
19201 +       led_next_time = jiffies;
19202         return 0;
19203  }
19204  
19205 @@ -596,7 +421,7 @@
19206  static int
19207  e100_set_mac_address(struct net_device *dev, void *p)
19208  {
19209 -       struct net_local *np = (struct net_local *)dev->priv;
19210 +       struct net_local *np = netdev_priv(dev);
19211         struct sockaddr *addr = p;
19212         int i;
19213  
19214 @@ -680,17 +505,36 @@
19215         /* allocate the irq corresponding to the transmitting DMA */
19216  
19217         if (request_irq(NETWORK_DMA_TX_IRQ_NBR, e100rxtx_interrupt, 0,
19218 -                       cardname, (void *)dev)) {
19219 +                       cardname, (void *)dev)) {
19220                 goto grace_exit1;
19221         }
19222  
19223         /* allocate the irq corresponding to the network errors etc */
19224  
19225         if (request_irq(NETWORK_STATUS_IRQ_NBR, e100nw_interrupt, 0,
19226 -                       cardname, (void *)dev)) {
19227 +                       cardname, (void *)dev)) {
19228                 goto grace_exit2;
19229         }
19230  
19231 +       /*
19232 +        * Always allocate the DMA channels after the IRQ,
19233 +        * and clean up on failure.
19234 +        */
19235 +
19236 +       if (cris_request_dma(NETWORK_TX_DMA_NBR,
19237 +                            cardname,
19238 +                            DMA_VERBOSE_ON_ERROR,
19239 +                            dma_eth)) {
19240 +               goto grace_exit3;
19241 +        }
19242 +
19243 +       if (cris_request_dma(NETWORK_RX_DMA_NBR,
19244 +                            cardname,
19245 +                            DMA_VERBOSE_ON_ERROR,
19246 +                            dma_eth)) {
19247 +               goto grace_exit4;
19248 +        }
19249 +
19250         /* give the HW an idea of what MAC address we want */
19251  
19252         *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
19253 @@ -705,6 +549,7 @@
19254  
19255         *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */
19256  #else
19257 +       SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522);
19258         SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive);
19259         SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable);
19260         SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
19261 @@ -724,8 +569,7 @@
19262         SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable);
19263         *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
19264  
19265 -       save_flags(flags);
19266 -       cli();
19267 +       local_irq_save(flags);
19268  
19269         /* enable the irq's for ethernet DMA */
19270  
19271 @@ -757,12 +601,13 @@
19272  
19273         *R_DMA_CH0_FIRST = 0;
19274         *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
19275 +       netif_start_queue(dev);
19276  
19277 -       restore_flags(flags);
19278 +       local_irq_restore(flags);
19279  
19280         /* Probe for transceiver */
19281         if (e100_probe_transceiver(dev))
19282 -               goto grace_exit3;
19283 +               goto grace_exit5;
19284  
19285         /* Start duplex/speed timers */
19286         add_timer(&speed_timer);
19287 @@ -771,10 +616,14 @@
19288         /* We are now ready to accept transmit requeusts from
19289          * the queueing layer of the networking.
19290          */
19291 -       netif_start_queue(dev);
19292 +       netif_carrier_on(dev);
19293  
19294         return 0;
19295  
19296 +grace_exit5:
19297 +       cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
19298 +grace_exit4:
19299 +       cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
19300  grace_exit3:
19301         free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
19302  grace_exit2:
19303 @@ -785,7 +634,13 @@
19304         return -EAGAIN;
19305  }
19306  
19307 -
19308 +#if defined(CONFIG_ETRAX_NO_PHY)
19309 +static void
19310 +dummy_check_speed(struct net_device* dev)
19311 +{
19312 +       current_speed = 100;
19313 +}
19314 +#else
19315  static void
19316  generic_check_speed(struct net_device* dev)
19317  {
19318 @@ -821,15 +676,18 @@
19319         data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2);
19320         current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
19321  }
19322 -
19323 +#endif
19324  static void
19325  e100_check_speed(unsigned long priv)
19326  {
19327         struct net_device* dev = (struct net_device*)priv;
19328 +       struct net_local *np = netdev_priv(dev);
19329         static int led_initiated = 0;
19330         unsigned long data;
19331         int old_speed = current_speed;
19332  
19333 +       spin_lock(&np->transceiver_lock);
19334 +
19335         data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR);
19336         if (!(data & BMSR_LSTATUS)) {
19337                 current_speed = 0;
19338 @@ -837,14 +695,22 @@
19339                 transceiver->check_speed(dev);
19340         }
19341  
19342 +       spin_lock(&np->led_lock);
19343         if ((old_speed != current_speed) || !led_initiated) {
19344                 led_initiated = 1;
19345                 e100_set_network_leds(NO_NETWORK_ACTIVITY);
19346 +               if (current_speed)
19347 +                       netif_carrier_on(dev);
19348 +               else
19349 +                       netif_carrier_off(dev);
19350         }
19351 +       spin_unlock(&np->led_lock);
19352  
19353         /* Reinitialize the timer. */
19354         speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
19355         add_timer(&speed_timer);
19356 +
19357 +       spin_unlock(&np->transceiver_lock);
19358  }
19359  
19360  static void
19361 @@ -857,7 +723,7 @@
19362                   ADVERTISE_10HALF | ADVERTISE_10FULL);
19363  
19364         switch (current_speed_selection) {
19365 -               case 10 :
19366 +               case 10:
19367                         if (current_duplex == full)
19368                                 data |= ADVERTISE_10FULL;
19369                         else if (current_duplex == half)
19370 @@ -866,7 +732,7 @@
19371                                 data |= ADVERTISE_10HALF | ADVERTISE_10FULL;
19372                         break;
19373  
19374 -               case 100 :
19375 +               case 100:
19376                          if (current_duplex == full)
19377                                 data |= ADVERTISE_100FULL;
19378                         else if (current_duplex == half)
19379 @@ -875,45 +741,54 @@
19380                                 data |= ADVERTISE_100HALF | ADVERTISE_100FULL;
19381                         break;
19382  
19383 -               case 0 : /* Auto */
19384 +               case 0: /* Auto */
19385                          if (current_duplex == full)
19386                                 data |= ADVERTISE_100FULL | ADVERTISE_10FULL;
19387                         else if (current_duplex == half)
19388                                 data |= ADVERTISE_100HALF | ADVERTISE_10HALF;
19389                         else
19390                                 data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
19391 -                                 ADVERTISE_100HALF | ADVERTISE_100FULL;
19392 +                                       ADVERTISE_100HALF | ADVERTISE_100FULL;
19393                         break;
19394  
19395 -               default : /* assume autoneg speed and duplex */
19396 +               default: /* assume autoneg speed and duplex */
19397                         data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
19398 -                                 ADVERTISE_100HALF | ADVERTISE_100FULL;
19399 +                               ADVERTISE_100HALF | ADVERTISE_100FULL;
19400 +                       break;
19401         }
19402  
19403         e100_set_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE, data);
19404  
19405         /* Renegotiate with link partner */
19406 -       data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR);
19407 -       data |= BMCR_ANENABLE | BMCR_ANRESTART;
19408 -
19409 +       if (autoneg_normal) {
19410 +         data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR);
19411 +         data |= BMCR_ANENABLE | BMCR_ANRESTART;
19412 +       }
19413         e100_set_mdio_reg(dev, mdio_phy_addr, MII_BMCR, data);
19414  }
19415  
19416  static void
19417  e100_set_speed(struct net_device* dev, unsigned long speed)
19418  {
19419 +       struct net_local *np = netdev_priv(dev);
19420 +
19421 +       spin_lock(&np->transceiver_lock);
19422         if (speed != current_speed_selection) {
19423                 current_speed_selection = speed;
19424                 e100_negotiate(dev);
19425         }
19426 +       spin_unlock(&np->transceiver_lock);
19427  }
19428  
19429  static void
19430  e100_check_duplex(unsigned long priv)
19431  {
19432         struct net_device *dev = (struct net_device *)priv;
19433 -       struct net_local *np = (struct net_local *)dev->priv;
19434 -       int old_duplex = full_duplex;
19435 +       struct net_local *np = netdev_priv(dev);
19436 +       int old_duplex;
19437 +
19438 +       spin_lock(&np->transceiver_lock);
19439 +       old_duplex = full_duplex;
19440         transceiver->check_duplex(dev);
19441         if (old_duplex != full_duplex) {
19442                 /* Duplex changed */
19443 @@ -925,12 +800,20 @@
19444         duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
19445         add_timer(&duplex_timer);
19446         np->mii_if.full_duplex = full_duplex;
19447 +       spin_unlock(&np->transceiver_lock);
19448  }
19449 -
19450 +#if defined(CONFIG_ETRAX_NO_PHY)
19451 +static void
19452 +dummy_check_duplex(struct net_device* dev)
19453 +{
19454 +       full_duplex = 1;
19455 +}
19456 +#else
19457  static void
19458  generic_check_duplex(struct net_device* dev)
19459  {
19460         unsigned long data;
19461 +
19462         data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE);
19463         if ((data & ADVERTISE_10FULL) ||
19464             (data & ADVERTISE_100FULL))
19465 @@ -943,6 +826,7 @@
19466  tdk_check_duplex(struct net_device* dev)
19467  {
19468         unsigned long data;
19469 +
19470         data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG);
19471         full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
19472  }
19473 @@ -951,6 +835,7 @@
19474  broadcom_check_duplex(struct net_device* dev)
19475  {
19476         unsigned long data;
19477 +
19478         data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG);
19479         full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
19480  }
19481 @@ -959,26 +844,35 @@
19482  intel_check_duplex(struct net_device* dev)
19483  {
19484         unsigned long data;
19485 +
19486         data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2);
19487         full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
19488  }
19489 -
19490 +#endif
19491  static void
19492  e100_set_duplex(struct net_device* dev, enum duplex new_duplex)
19493  {
19494 +       struct net_local *np = netdev_priv(dev);
19495 +
19496 +       spin_lock(&np->transceiver_lock);
19497         if (new_duplex != current_duplex) {
19498                 current_duplex = new_duplex;
19499                 e100_negotiate(dev);
19500         }
19501 +       spin_unlock(&np->transceiver_lock);
19502  }
19503  
19504  static int
19505  e100_probe_transceiver(struct net_device* dev)
19506  {
19507 +#if !defined(CONFIG_ETRAX_NO_PHY)
19508         unsigned int phyid_high;
19509         unsigned int phyid_low;
19510         unsigned int oui;
19511         struct transceiver_ops* ops = NULL;
19512 +       struct net_local *np = netdev_priv(dev);
19513 +
19514 +       spin_lock(&np->transceiver_lock);
19515  
19516         /* Probe MDIO physical address */
19517         for (mdio_phy_addr = 0; mdio_phy_addr <= 31; mdio_phy_addr++) {
19518 @@ -986,7 +880,7 @@
19519                         break;
19520         }
19521         if (mdio_phy_addr == 32)
19522 -                return -ENODEV;
19523 +               return -ENODEV;
19524  
19525         /* Get manufacturer */
19526         phyid_high = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID1);
19527 @@ -999,6 +893,8 @@
19528         }
19529         transceiver = ops;
19530  
19531 +       spin_unlock(&np->transceiver_lock);
19532 +#endif
19533         return 0;
19534  }
19535  
19536 @@ -1006,7 +902,7 @@
19537  e100_get_mdio_reg(struct net_device *dev, int phy_id, int location)
19538  {
19539         unsigned short cmd;    /* Data to be sent on MDIO port */
19540 -       int data;   /* Data read from MDIO */
19541 +       int data;              /* Data read from MDIO */
19542         int bitCounter;
19543  
19544         /* Start of frame, OP Code, Physical Address, Register Address */
19545 @@ -1082,6 +978,7 @@
19546  e100_receive_mdio_bit()
19547  {
19548         unsigned char bit;
19549 +
19550         *R_NETWORK_MGM_CTRL = 0;
19551         bit = IO_EXTRACT(R_NETWORK_STAT, mdio, *R_NETWORK_STAT);
19552         udelay(1);
19553 @@ -1117,7 +1014,7 @@
19554  static void
19555  e100_tx_timeout(struct net_device *dev)
19556  {
19557 -       struct net_local *np = (struct net_local *)dev->priv;
19558 +       struct net_local *np = netdev_priv(dev);
19559         unsigned long flags;
19560  
19561         spin_lock_irqsave(&np->lock, flags);
19562 @@ -1139,8 +1036,7 @@
19563         e100_reset_transceiver(dev);
19564  
19565         /* and get rid of the packets that never got an interrupt */
19566 -       while (myFirstTxDesc != myNextTxDesc)
19567 -       {
19568 +       while (myFirstTxDesc != myNextTxDesc) {
19569                 dev_kfree_skb(myFirstTxDesc->skb);
19570                 myFirstTxDesc->skb = 0;
19571                 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
19572 @@ -1166,7 +1062,7 @@
19573  static int
19574  e100_send_packet(struct sk_buff *skb, struct net_device *dev)
19575  {
19576 -       struct net_local *np = (struct net_local *)dev->priv;
19577 +       struct net_local *np = netdev_priv(dev);
19578         unsigned char *buf = skb->data;
19579         unsigned long flags;
19580  
19581 @@ -1179,7 +1075,7 @@
19582  
19583         dev->trans_start = jiffies;
19584  
19585 -       e100_hardware_send_packet(buf, skb->len);
19586 +       e100_hardware_send_packet(np, buf, skb->len);
19587  
19588         myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next);
19589  
19590 @@ -1202,13 +1098,15 @@
19591  e100rxtx_interrupt(int irq, void *dev_id)
19592  {
19593         struct net_device *dev = (struct net_device *)dev_id;
19594 -       struct net_local *np = (struct net_local *)dev->priv;
19595 -       unsigned long irqbits = *R_IRQ_MASK2_RD;
19596 +       struct net_local *np = netdev_priv(dev);
19597 +       unsigned long irqbits;
19598  
19599 -       /* Disable RX/TX IRQs to avoid reentrancy */
19600 -       *R_IRQ_MASK2_CLR =
19601 -         IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
19602 -         IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
19603 +       /* 
19604 +        * Note that both rx and tx interrupts are blocked at this point, 
19605 +        * regardless of which got us here.
19606 +        */
19607 +       
19608 +       irqbits = *R_IRQ_MASK2_RD;
19609  
19610         /* Handle received packets */
19611         if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) {
19612 @@ -1224,7 +1122,7 @@
19613                          * allocate a new buffer to put a packet in.
19614                          */
19615                         e100_rx(dev);
19616 -                       ((struct net_local *)dev->priv)->stats.rx_packets++;
19617 +                       np->stats.rx_packets++;
19618                         /* restart/continue on the channel, for safety */
19619                         *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
19620                         /* clear dma channel 1 eop/descr irq bits */
19621 @@ -1239,8 +1137,7 @@
19622  
19623         /* Report any packets that have been sent */
19624         while (myFirstTxDesc != phys_to_virt(*R_DMA_CH0_FIRST) &&
19625 -              myFirstTxDesc != myNextTxDesc)
19626 -       {
19627 +              (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
19628                 np->stats.tx_bytes += myFirstTxDesc->skb->len;
19629                 np->stats.tx_packets++;
19630  
19631 @@ -1249,19 +1146,15 @@
19632                 dev_kfree_skb_irq(myFirstTxDesc->skb);
19633                 myFirstTxDesc->skb = 0;
19634                 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
19635 +                /* Wake up queue. */
19636 +               netif_wake_queue(dev);
19637         }
19638  
19639         if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) {
19640 -               /* acknowledge the eop interrupt and wake up queue */
19641 +               /* acknowledge the eop interrupt. */
19642                 *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
19643 -               netif_wake_queue(dev);
19644         }
19645  
19646 -       /* Enable RX/TX IRQs again */
19647 -       *R_IRQ_MASK2_SET =
19648 -         IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) |
19649 -         IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set);
19650 -
19651         return IRQ_HANDLED;
19652  }
19653  
19654 @@ -1269,7 +1162,7 @@
19655  e100nw_interrupt(int irq, void *dev_id)
19656  {
19657         struct net_device *dev = (struct net_device *)dev_id;
19658 -       struct net_local *np = (struct net_local *)dev->priv;
19659 +       struct net_local *np = netdev_priv(dev);
19660         unsigned long irqbits = *R_IRQ_MASK0_RD;
19661  
19662         /* check for underrun irq */
19663 @@ -1291,7 +1184,6 @@
19664                 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
19665                 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
19666                 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
19667 -               *R_NETWORK_TR_CTRL = IO_STATE(R_NETWORK_TR_CTRL, clr_error, clr);
19668                 np->stats.tx_errors++;
19669                 D(printk("ethernet excessive collisions!\n"));
19670         }
19671 @@ -1304,12 +1196,13 @@
19672  {
19673         struct sk_buff *skb;
19674         int length = 0;
19675 -       struct net_local *np = (struct net_local *)dev->priv;
19676 +       struct net_local *np = netdev_priv(dev);
19677         unsigned char *skb_data_ptr;
19678  #ifdef ETHDEBUG
19679         int i;
19680  #endif
19681 -
19682 +       etrax_eth_descr *prevRxDesc;  /* The descriptor right before myNextRxDesc */
19683 +       spin_lock(&np->led_lock);
19684         if (!led_active && time_after(jiffies, led_next_time)) {
19685                 /* light the network leds depending on the current speed. */
19686                 e100_set_network_leds(NETWORK_ACTIVITY);
19687 @@ -1319,9 +1212,10 @@
19688                 led_active = 1;
19689                 mod_timer(&clear_led_timer, jiffies + HZ/10);
19690         }
19691 +       spin_unlock(&np->led_lock);
19692  
19693         length = myNextRxDesc->descr.hw_len - 4;
19694 -       ((struct net_local *)dev->priv)->stats.rx_bytes += length;
19695 +       np->stats.rx_bytes += length;
19696  
19697  #ifdef ETHDEBUG
19698         printk("Got a packet of length %d:\n", length);
19699 @@ -1341,7 +1235,7 @@
19700                 if (!skb) {
19701                         np->stats.rx_errors++;
19702                         printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
19703 -                       return;
19704 +                       goto update_nextrxdesc;
19705                 }
19706  
19707                 skb_put(skb, length - ETHER_HEAD_LEN);        /* allocate room for the packet body */
19708 @@ -1358,15 +1252,15 @@
19709         else {
19710                 /* Large packet, send directly to upper layers and allocate new
19711                  * memory (aligned to cache line boundary to avoid bug).
19712 -                * Before sending the skb to upper layers we must make sure that
19713 -                * skb->data points to the aligned start of the packet.
19714 +                * Before sending the skb to upper layers we must make sure
19715 +                * that skb->data points to the aligned start of the packet.
19716                  */
19717                 int align;
19718                 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
19719                 if (!new_skb) {
19720                         np->stats.rx_errors++;
19721                         printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
19722 -                       return;
19723 +                       goto update_nextrxdesc;
19724                 }
19725                 skb = myNextRxDesc->skb;
19726                 align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data;
19727 @@ -1382,9 +1276,10 @@
19728         /* Send the packet to the upper layers */
19729         netif_rx(skb);
19730  
19731 +  update_nextrxdesc:
19732         /* Prepare for next packet */
19733         myNextRxDesc->descr.status = 0;
19734 -       myPrevRxDesc = myNextRxDesc;
19735 +       prevRxDesc = myNextRxDesc;
19736         myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next);
19737  
19738         rx_queue_len++;
19739 @@ -1392,9 +1287,9 @@
19740         /* Check if descriptors should be returned */
19741         if (rx_queue_len == RX_QUEUE_THRESHOLD) {
19742                 flush_etrax_cache();
19743 -               myPrevRxDesc->descr.ctrl |= d_eol;
19744 +               prevRxDesc->descr.ctrl |= d_eol;
19745                 myLastRxDesc->descr.ctrl &= ~d_eol;
19746 -               myLastRxDesc = myPrevRxDesc;
19747 +               myLastRxDesc = prevRxDesc;
19748                 rx_queue_len = 0;
19749         }
19750  }
19751 @@ -1403,7 +1298,7 @@
19752  static int
19753  e100_close(struct net_device *dev)
19754  {
19755 -       struct net_local *np = (struct net_local *)dev->priv;
19756 +       struct net_local *np = netdev_priv(dev);
19757  
19758         printk(KERN_INFO "Closing %s.\n", dev->name);
19759  
19760 @@ -1431,6 +1326,9 @@
19761         free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
19762         free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
19763  
19764 +       cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
19765 +       cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
19766 +
19767         /* Update the statistics here. */
19768  
19769         update_rx_stats(&np->stats);
19770 @@ -1448,46 +1346,56 @@
19771  {
19772         struct mii_ioctl_data *data = if_mii(ifr);
19773         struct net_local *np = netdev_priv(dev);
19774 +       int ret = 0;
19775 +        int old_autoneg;
19776  
19777         spin_lock(&np->lock); /* Preempt protection */
19778         switch (cmd) {
19779 -               case SIOCGMIIPHY: /* Get PHY address */
19780 +               case SIOCGMIIPHY:               /* Get PHY address */
19781                         data->phy_id = mdio_phy_addr;
19782                         break;
19783 -               case SIOCGMIIREG: /* Read MII register */
19784 +               case SIOCGMIIREG:               /* Read MII register */
19785                         data->val_out = e100_get_mdio_reg(dev, mdio_phy_addr, data->reg_num);
19786                         break;
19787 -               case SIOCSMIIREG: /* Write MII register */
19788 +               case SIOCSMIIREG:               /* Write MII register */
19789                         e100_set_mdio_reg(dev, mdio_phy_addr, data->reg_num, data->val_in);
19790                         break;
19791 +
19792                 /* The ioctls below should be considered obsolete but are */
19793                 /* still present for compatability with old scripts/apps  */
19794 -               case SET_ETH_SPEED_10:                  /* 10 Mbps */
19795 +               case SET_ETH_SPEED_10:          /* 10 Mbps */
19796                         e100_set_speed(dev, 10);
19797                         break;
19798 -               case SET_ETH_SPEED_100:                /* 100 Mbps */
19799 +               case SET_ETH_SPEED_100:         /* 100 Mbps */
19800                         e100_set_speed(dev, 100);
19801                         break;
19802 -               case SET_ETH_SPEED_AUTO:              /* Auto negotiate speed */
19803 +               case SET_ETH_SPEED_AUTO:        /* Auto-negotiate speed */
19804                         e100_set_speed(dev, 0);
19805                         break;
19806 -               case SET_ETH_DUPLEX_HALF:              /* Half duplex. */
19807 +               case SET_ETH_DUPLEX_HALF:       /* Half duplex */
19808                         e100_set_duplex(dev, half);
19809                         break;
19810 -               case SET_ETH_DUPLEX_FULL:              /* Full duplex. */
19811 +               case SET_ETH_DUPLEX_FULL:       /* Full duplex */
19812                         e100_set_duplex(dev, full);
19813                         break;
19814 -               case SET_ETH_DUPLEX_AUTO:             /* Autonegotiate duplex*/
19815 +               case SET_ETH_DUPLEX_AUTO:       /* Auto-negotiate duplex */
19816                         e100_set_duplex(dev, autoneg);
19817                         break;
19818 +               case SET_ETH_AUTONEG:
19819 +                       old_autoneg = autoneg_normal;
19820 +                       autoneg_normal = *(int*)data;
19821 +                       if (autoneg_normal != old_autoneg)
19822 +                               e100_negotiate(dev);
19823 +                       break;
19824                 default:
19825 +                       spin_unlock(&np->lock);
19826                         return -EINVAL;
19827         }
19828         spin_unlock(&np->lock);
19829 -       return 0;
19830 +       return ret;
19831  }
19832  
19833 -static int e100_set_settings(struct net_device *dev,
19834 +static int e100_get_settings(struct net_device *dev,
19835                              struct ethtool_cmd *ecmd)
19836  {
19837         ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII |
19838 @@ -1565,7 +1473,8 @@
19839  static int
19840  e100_set_config(struct net_device *dev, struct ifmap *map)
19841  {
19842 -       struct net_local *np = (struct net_local *)dev->priv;
19843 +       struct net_local *np = netdev_priv(dev);
19844 +
19845         spin_lock(&np->lock); /* Preempt protection */
19846  
19847         switch(map->port) {
19848 @@ -1574,21 +1483,25 @@
19849                         e100_set_speed(dev, 0);
19850                         e100_set_duplex(dev, autoneg);
19851                         break;
19852 +
19853                 case IF_PORT_10BASET:
19854                         e100_set_speed(dev, 10);
19855                         e100_set_duplex(dev, autoneg);
19856                         break;
19857 +
19858                 case IF_PORT_100BASET:
19859                 case IF_PORT_100BASETX:
19860                         e100_set_speed(dev, 100);
19861                         e100_set_duplex(dev, autoneg);
19862                         break;
19863 +
19864                 case IF_PORT_100BASEFX:
19865                 case IF_PORT_10BASE2:
19866                 case IF_PORT_AUI:
19867                         spin_unlock(&np->lock);
19868                         return -EOPNOTSUPP;
19869                         break;
19870 +
19871                 default:
19872                         printk(KERN_ERR "%s: Invalid media selected", dev->name);
19873                         spin_unlock(&np->lock);
19874 @@ -1602,6 +1515,7 @@
19875  update_rx_stats(struct net_device_stats *es)
19876  {
19877         unsigned long r = *R_REC_COUNTERS;
19878 +
19879         /* update stats relevant to reception errors */
19880         es->rx_fifo_errors += IO_EXTRACT(R_REC_COUNTERS, congestion, r);
19881         es->rx_crc_errors += IO_EXTRACT(R_REC_COUNTERS, crc_error, r);
19882 @@ -1613,11 +1527,11 @@
19883  update_tx_stats(struct net_device_stats *es)
19884  {
19885         unsigned long r = *R_TR_COUNTERS;
19886 +
19887         /* update stats relevant to transmission errors */
19888         es->collisions +=
19889                 IO_EXTRACT(R_TR_COUNTERS, single_col, r) +
19890                 IO_EXTRACT(R_TR_COUNTERS, multiple_col, r);
19891 -       es->tx_errors += IO_EXTRACT(R_TR_COUNTERS, deferred, r);
19892  }
19893  
19894  /*
19895 @@ -1627,8 +1541,9 @@
19896  static struct net_device_stats *
19897  e100_get_stats(struct net_device *dev)
19898  {
19899 -       struct net_local *lp = (struct net_local *)dev->priv;
19900 +       struct net_local *lp = netdev_priv(dev);
19901         unsigned long flags;
19902 +
19903         spin_lock_irqsave(&lp->lock, flags);
19904  
19905         update_rx_stats(&lp->stats);
19906 @@ -1640,21 +1555,21 @@
19907  
19908  /*
19909   * Set or clear the multicast filter for this adaptor.
19910 - * num_addrs == -1     Promiscuous mode, receive all packets
19911 - * num_addrs == 0      Normal mode, clear multicast list
19912 - * num_addrs > 0       Multicast mode, receive normal and MC packets,
19913 - *                     and do best-effort filtering.
19914 + * num_addrs == -1      Promiscuous mode, receive all packets
19915 + * num_addrs == 0       Normal mode, clear multicast list
19916 + * num_addrs > 0        Multicast mode, receive normal and MC packets,
19917 + *                      and do best-effort filtering.
19918   */
19919  static void
19920  set_multicast_list(struct net_device *dev)
19921  {
19922 -       struct net_local *lp = (struct net_local *)dev->priv;
19923 +       struct net_local *lp = netdev_priv(dev);
19924         int num_addr = dev->mc_count;
19925         unsigned long int lo_bits;
19926         unsigned long int hi_bits;
19927 +
19928         spin_lock(&lp->lock);
19929 -       if (dev->flags & IFF_PROMISC)
19930 -       {
19931 +       if (dev->flags & IFF_PROMISC) {
19932                 /* promiscuous mode */
19933                 lo_bits = 0xfffffffful;
19934                 hi_bits = 0xfffffffful;
19935 @@ -1684,9 +1599,10 @@
19936                 struct dev_mc_list *dmi = dev->mc_list;
19937                 int i;
19938                 char *baddr;
19939 +
19940                 lo_bits = 0x00000000ul;
19941                 hi_bits = 0x00000000ul;
19942 -               for (i=0; i<num_addr; i++) {
19943 +               for (i = 0; i < num_addr; i++) {
19944                         /* Calculate the hash index for the GA registers */
19945  
19946                         hash_ix = 0;
19947 @@ -1713,8 +1629,7 @@
19948  
19949                         if (hash_ix >= 32) {
19950                                 hi_bits |= (1 << (hash_ix-32));
19951 -                       }
19952 -                       else {
19953 +                       } else {
19954                                 lo_bits |= (1 << hash_ix);
19955                         }
19956                         dmi = dmi->next;
19957 @@ -1729,10 +1644,11 @@
19958  }
19959  
19960  void
19961 -e100_hardware_send_packet(char *buf, int length)
19962 +e100_hardware_send_packet(struct net_local *np, char *buf, int length)
19963  {
19964         D(printk("e100 send pack, buf 0x%x len %d\n", buf, length));
19965  
19966 +       spin_lock(&np->led_lock);
19967         if (!led_active && time_after(jiffies, led_next_time)) {
19968                 /* light the network leds depending on the current speed. */
19969                 e100_set_network_leds(NETWORK_ACTIVITY);
19970 @@ -1742,15 +1658,16 @@
19971                 led_active = 1;
19972                 mod_timer(&clear_led_timer, jiffies + HZ/10);
19973         }
19974 +       spin_unlock(&np->led_lock);
19975  
19976         /* configure the tx dma descriptor */
19977         myNextTxDesc->descr.sw_len = length;
19978         myNextTxDesc->descr.ctrl = d_eop | d_eol | d_wait;
19979         myNextTxDesc->descr.buf = virt_to_phys(buf);
19980  
19981 -        /* Move end of list */
19982 -        myLastTxDesc->descr.ctrl &= ~d_eol;
19983 -        myLastTxDesc = myNextTxDesc;
19984 +       /* Move end of list */
19985 +       myLastTxDesc->descr.ctrl &= ~d_eol;
19986 +       myLastTxDesc = myNextTxDesc;
19987  
19988         /* Restart DMA channel */
19989         *R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, restart);
19990 @@ -1759,6 +1676,11 @@
19991  static void
19992  e100_clear_network_leds(unsigned long dummy)
19993  {
19994 +       struct net_device *dev = (struct net_device *)dummy;
19995 +       struct net_local *np = netdev_priv(dev);
19996 +
19997 +       spin_lock(&np->led_lock);
19998 +
19999         if (led_active && time_after(jiffies, led_next_time)) {
20000                 e100_set_network_leds(NO_NETWORK_ACTIVITY);
20001  
20002 @@ -1766,6 +1688,8 @@
20003                 led_next_time = jiffies + NET_FLASH_PAUSE;
20004                 led_active = 0;
20005         }
20006 +
20007 +       spin_unlock(&np->led_lock);
20008  }
20009  
20010  static void
20011 @@ -1786,19 +1710,25 @@
20012  #else
20013                 LED_NETWORK_SET(LED_OFF);
20014  #endif
20015 -       }
20016 -       else if (light_leds) {
20017 +       } else if (light_leds) {
20018                 if (current_speed == 10) {
20019                         LED_NETWORK_SET(LED_ORANGE);
20020                 } else {
20021                         LED_NETWORK_SET(LED_GREEN);
20022                 }
20023 -       }
20024 -       else {
20025 +       } else {
20026                 LED_NETWORK_SET(LED_OFF);
20027         }
20028  }
20029  
20030 +#ifdef CONFIG_NET_POLL_CONTROLLER
20031 +static void
20032 +e100_netpoll(struct net_device* netdev)
20033 +{
20034 +       e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
20035 +}
20036 +#endif
20037 +
20038  static int
20039  etrax_init_module(void)
20040  {
20041 diff -urN linux-2.6.19.2.orig/drivers/net/cris/eth_v32.c linux-2.6.19.2.dev/drivers/net/cris/eth_v32.c
20042 --- linux-2.6.19.2.orig/drivers/net/cris/eth_v32.c      1970-01-01 01:00:00.000000000 +0100
20043 +++ linux-2.6.19.2.dev/drivers/net/cris/eth_v32.c       2007-02-06 11:10:37.000000000 +0100
20044 @@ -0,0 +1,2305 @@
20045 +/*
20046 + * Driver for the ETRAX FS network controller.
20047 + *
20048 + * Copyright (c) 2003-2006 Axis Communications AB.
20049 + */
20050 +
20051 +#include <linux/module.h>
20052 +
20053 +#include <linux/kernel.h>
20054 +#include <linux/sched.h>
20055 +#include <linux/delay.h>
20056 +#include <linux/types.h>
20057 +#include <linux/fcntl.h>
20058 +#include <linux/interrupt.h>
20059 +#include <linux/ptrace.h>
20060 +#include <linux/ioport.h>
20061 +#include <linux/in.h>
20062 +#include <linux/slab.h>
20063 +#include <linux/string.h>
20064 +#include <linux/spinlock.h>
20065 +#include <linux/errno.h>
20066 +#include <linux/init.h>
20067 +#include <linux/cpufreq.h>
20068 +
20069 +#include <linux/netdevice.h>
20070 +#include <linux/etherdevice.h>
20071 +#include <linux/skbuff.h>
20072 +#include <linux/ethtool.h>
20073 +#include <linux/mii.h>
20074 +
20075 +#include <asm/io.h>            /* LED_* I/O functions */
20076 +#include <asm/irq.h>
20077 +#include <asm/arch/hwregs/reg_map.h>
20078 +#include <asm/arch/hwregs/reg_rdwr.h>
20079 +#include <asm/arch/hwregs/dma.h>
20080 +#include <asm/arch/hwregs/eth_defs.h>
20081 +#include <asm/arch/hwregs/config_defs.h>
20082 +#include <asm/arch/hwregs/intr_vect_defs.h>
20083 +#include <asm/system.h>
20084 +#include <asm/bitops.h>
20085 +#include <asm/ethernet.h>
20086 +#include <asm/arch/dma.h>
20087 +#include <asm/arch/intmem.h>
20088 +#include <asm/arch/pinmux.h>
20089 +
20090 +#include "eth_v32.h"
20091 +
20092 +#define DEBUG(x)
20093 +#define GET_BIT(bit,val)   (((val) >> (bit)) & 0x01)
20094 +
20095 +/* Toggle network LEDs on/off at runtime */
20096 +static int use_network_leds = 1;
20097 +
20098 +static void update_rx_stats(struct crisv32_ethernet_local *np);
20099 +static void update_tx_stats(struct crisv32_ethernet_local *np); 
20100 +static void crisv32_eth_setup_controller(struct net_device *dev);
20101 +static int  crisv32_eth_request_irqdma(struct net_device *dev);
20102 +static void crisv32_eth_init_rings(struct net_device *dev);
20103 +static void crisv32_eth_reset_rings(struct net_device *dev);
20104 +static void crisv32_ethernet_bug(struct net_device *dev);
20105 +
20106 +/*
20107 + * The name of the card. Is used for messages and in the requests for
20108 + * io regions, irqs and dma channels.
20109 + */
20110 +static const char *cardname = "ETRAX FS built-in ethernet controller";
20111 +
20112 +static int autoneg_normal = 1;
20113 +
20114 +/* Some chipset needs special care. */
20115 +struct transceiver_ops transceivers[] = {
20116 +       {0x1018, broadcom_check_speed, broadcom_check_duplex},
20117 +       /* TDK 2120 and TDK 2120C */
20118 +       {0xC039, tdk_check_speed, tdk_check_duplex}, 
20119 +       {0x039C, tdk_check_speed, tdk_check_duplex},    
20120 +       /* Intel LXT972A*/
20121 +       {0x04de, intel_check_speed, intel_check_duplex},
20122 +       /* National Semiconductor DP83865 */
20123 +       {0x0017, national_check_speed, national_check_duplex},
20124 +       /* Generic, must be last. */
20125 +       {0x0000, generic_check_speed, generic_check_duplex}
20126 +};
20127 +
20128 +static struct net_device *crisv32_dev[2];
20129 +static struct crisv32_eth_leds *crisv32_leds[3];
20130 +
20131 +#ifdef CONFIG_CPU_FREQ
20132 +static int
20133 +crisv32_ethernet_freq_notifier(struct notifier_block *nb, unsigned long val,
20134 +                              void *data);
20135 +
20136 +static struct notifier_block crisv32_ethernet_freq_notifier_block = {
20137 +       .notifier_call  = crisv32_ethernet_freq_notifier
20138 +};
20139 +#endif
20140 +
20141 +/*
20142 + * mask in and out tx/rx interrupts.
20143 + */
20144 +static inline void crisv32_disable_tx_ints(struct crisv32_ethernet_local *np)
20145 +{
20146 +       reg_dma_rw_intr_mask intr_mask_tx = { .data = regk_dma_no };
20147 +       REG_WR(dma, np->dma_out_inst, rw_intr_mask, intr_mask_tx);
20148 +}
20149 +
20150 +static inline void crisv32_enable_tx_ints(struct crisv32_ethernet_local *np)
20151 +{
20152 +       reg_dma_rw_intr_mask intr_mask_tx = { .data = regk_dma_yes };
20153 +       REG_WR(dma, np->dma_out_inst, rw_intr_mask, intr_mask_tx);
20154 +}
20155 +
20156 +static inline void crisv32_disable_rx_ints(struct crisv32_ethernet_local *np)
20157 +{
20158 +       reg_dma_rw_intr_mask intr_mask_rx = { .in_eop = regk_dma_no };
20159 +       REG_WR(dma, np->dma_in_inst, rw_intr_mask, intr_mask_rx);
20160 +}
20161 +
20162 +static inline void crisv32_enable_rx_ints(struct crisv32_ethernet_local *np)
20163 +{
20164 +       reg_dma_rw_intr_mask intr_mask_rx = { .in_eop = regk_dma_yes };
20165 +       REG_WR(dma, np->dma_in_inst, rw_intr_mask, intr_mask_rx);
20166 +}
20167 +
20168 +/* start/stop receiver */
20169 +static inline void crisv32_start_receiver(struct crisv32_ethernet_local *np)
20170 +{
20171 +       reg_eth_rw_rec_ctrl rec_ctrl;
20172 +       
20173 +       rec_ctrl = REG_RD(eth, np->eth_inst, rw_rec_ctrl);
20174 +       rec_ctrl.ma0 = regk_eth_yes;
20175 +       rec_ctrl.broadcast = regk_eth_rec;
20176 +       REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
20177 +}
20178 +
20179 +static inline void crisv32_stop_receiver(struct crisv32_ethernet_local *np)
20180 +{
20181 +       reg_eth_rw_rec_ctrl rec_ctrl;
20182 +
20183 +       rec_ctrl = REG_RD(eth, np->eth_inst, rw_rec_ctrl);
20184 +       rec_ctrl.ma0 = regk_eth_no;
20185 +       rec_ctrl.broadcast = regk_eth_discard;
20186 +       REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
20187 +}
20188 +
20189 +static int __init
20190 +crisv32_eth_request_irqdma(struct net_device *dev)
20191 +{
20192 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
20193 +       
20194 +       /* Allocate IRQs and DMAs. */
20195 +       if (np->eth_inst == regi_eth0) {
20196 +               if (request_irq(DMA0_INTR_VECT, crisv32tx_eth_interrupt, 
20197 +                               0, cardname, dev)) {
20198 +                       return -EAGAIN;
20199 +               }
20200 +
20201 +               if (request_irq(DMA1_INTR_VECT, crisv32rx_eth_interrupt,
20202 +                               IRQF_SAMPLE_RANDOM, cardname, dev)) {
20203 +                       goto err0_1; 
20204 +               }
20205 +               
20206 +               if (crisv32_request_dma(0, cardname, DMA_VERBOSE_ON_ERROR,
20207 +                                       12500000, dma_eth0))
20208 +                       goto err0_2;
20209 +               
20210 +               if (crisv32_request_dma(1, cardname, DMA_VERBOSE_ON_ERROR,
20211 +                                       12500000, dma_eth0))
20212 +                       goto err0_3;
20213 +               
20214 +               if (request_irq(ETH0_INTR_VECT, crisv32nw_eth_interrupt, 0,
20215 +                               cardname, dev)) {
20216 +                       crisv32_free_dma(1);
20217 +                 err0_3:
20218 +                       crisv32_free_dma(0);
20219 +                 err0_2:
20220 +                       free_irq(DMA1_INTR_VECT, dev);
20221 +                 err0_1:
20222 +                       free_irq(DMA0_INTR_VECT, dev);
20223 +                       return -EAGAIN;
20224 +               }
20225 +       } else {
20226 +               if (request_irq(DMA6_INTR_VECT, crisv32tx_eth_interrupt,
20227 +                               0, cardname, dev))
20228 +                       return -EAGAIN;
20229 +               
20230 +               if (request_irq(DMA7_INTR_VECT, crisv32rx_eth_interrupt,
20231 +                               IRQF_SAMPLE_RANDOM, cardname, dev))
20232 +                       goto err1_1;
20233 +
20234 +               if (crisv32_request_dma(6, cardname, DMA_VERBOSE_ON_ERROR,
20235 +                                       0, dma_eth1))
20236 +                       goto err1_2;
20237 +
20238 +               if (crisv32_request_dma(7, cardname, DMA_VERBOSE_ON_ERROR,
20239 +                                       0, dma_eth1))
20240 +                       goto err1_3;
20241 +
20242 +               if (request_irq(ETH1_INTR_VECT, crisv32nw_eth_interrupt, 0,
20243 +                               cardname, dev)) {
20244 +                       crisv32_free_dma(7);
20245 +                 err1_3:
20246 +                       crisv32_free_dma(6);
20247 +                 err1_2:
20248 +                       free_irq(DMA7_INTR_VECT, dev);
20249 +                 err1_1:
20250 +                       free_irq(DMA6_INTR_VECT, dev);
20251 +                       return -EAGAIN;
20252 +               }
20253 +       }
20254 +       return 0;
20255 +}
20256 +
20257 +static void __init
20258 +crisv32_eth_setup_controller(struct net_device *dev)
20259 +{
20260 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
20261 +       
20262 +       reg_config_rw_pad_ctrl pad_ctrl;
20263 +       
20264 +       reg_eth_rw_tr_ctrl tr_ctrl = {
20265 +               .retry = regk_eth_yes,
20266 +               .pad = regk_eth_yes,
20267 +               .crc = regk_eth_yes
20268 +       };
20269 +       
20270 +       reg_eth_rw_rec_ctrl rec_ctrl = {
20271 +               .ma0 = regk_eth_no,       /* enable at open() */
20272 +               .broadcast = regk_eth_no,
20273 +               .max_size = regk_eth_size1522
20274 +       };
20275 +               
20276 +       reg_eth_rw_ga_lo ga_lo = { 0 };
20277 +       reg_eth_rw_ga_hi ga_hi = { 0 };
20278 +
20279 +       reg_eth_rw_gen_ctrl gen_ctrl = {
20280 +         .phy = regk_eth_mii_clk,
20281 +         .flow_ctrl = regk_eth_yes
20282 +       };
20283 +
20284 +       /* 
20285 +        * Initialize group address registers to make sure that no
20286 +        * unwanted addresses are matched.
20287 +        */
20288 +       REG_WR(eth, np->eth_inst, rw_ga_lo, ga_lo);
20289 +       REG_WR(eth, np->eth_inst, rw_ga_hi, ga_hi);
20290 +       
20291 +       /* Configure receiver and transmitter */
20292 +       REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
20293 +       REG_WR(eth, np->eth_inst, rw_tr_ctrl, tr_ctrl);
20294 +
20295 +       /* Enable ethernet controller with mii clk. */
20296 +       REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
20297 +       gen_ctrl.en = regk_eth_yes;
20298 +       REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
20299 +       
20300 +       /* keep reset low (RESET_LEN) */
20301 +       udelay(500);
20302 +
20303 +       /* done */
20304 +       pad_ctrl = REG_RD(config, regi_config, rw_pad_ctrl);
20305 +       pad_ctrl.phyrst_n = 1;
20306 +       REG_WR(config, regi_config, rw_pad_ctrl, pad_ctrl);
20307 +
20308 +       /* Let the PHY reset (RESET_WAIT) */
20309 +       udelay(200);
20310 +               
20311 +       crisv32_eth_probe_transceiver(dev);
20312 +}
20313 +
20314 +static void __init
20315 +crisv32_eth_init_rings(struct net_device *dev)
20316 +{
20317 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
20318 +       int i;
20319 +       
20320 +       /* Initialise receive descriptors for interface. */
20321 +       for (i = 0; i < NBR_RX_DESC; i++) {
20322 +               struct sk_buff *skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE);
20323 +
20324 +               np->dma_rx_descr_list[i].skb = skb;
20325 +               np->dma_rx_descr_list[i].descr.buf =
20326 +                       (char*)virt_to_phys(skb->data);
20327 +               np->dma_rx_descr_list[i].descr.after =
20328 +                   (char*)virt_to_phys(skb->data + MAX_MEDIA_DATA_SIZE);
20329 +
20330 +               np->dma_rx_descr_list[i].descr.eol = 0;
20331 +               np->dma_rx_descr_list[i].descr.in_eop = 0;
20332 +               np->dma_rx_descr_list[i].descr.next =
20333 +                   (void *) virt_to_phys(&np->dma_rx_descr_list[i + 1].descr);
20334 +       }
20335 +       /* bend the list into a ring */ 
20336 +       np->dma_rx_descr_list[NBR_RX_DESC - 1].descr.next =
20337 +               (void *) virt_to_phys(&np->dma_rx_descr_list[0].descr);
20338 +       
20339 +       /* Initialize transmit descriptors. */
20340 +       for (i = 0; i < NBR_TX_DESC; i++) {
20341 +               np->dma_tx_descr_list[i].descr.wait = 1;
20342 +               np->dma_tx_descr_list[i].descr.eol = 0;
20343 +               np->dma_tx_descr_list[i].descr.out_eop = 0;
20344 +               np->dma_tx_descr_list[i].descr.next =
20345 +                       (void*)virt_to_phys(&np->dma_tx_descr_list[i+1].descr);
20346 +       }
20347 +       /* bend the list into a ring */
20348 +       np->dma_tx_descr_list[NBR_TX_DESC - 1].descr.next =
20349 +               (void *) virt_to_phys(&np->dma_tx_descr_list[0].descr);
20350 +
20351 +       crisv32_eth_reset_rings(dev);
20352 +}
20353 +
20354 +static void
20355 +crisv32_eth_reset_rings(struct net_device *dev)
20356 +{
20357 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
20358 +       int i;
20359 +
20360 +       /* free un-handled tx packets */       
20361 +       while(np->txpackets
20362 +             || np->catch_tx_desc != np->active_tx_desc) {
20363 +               np->txpackets--;
20364 +               if (np->catch_tx_desc->skb)
20365 +                       dev_kfree_skb(np->catch_tx_desc->skb);
20366 +               
20367 +               np->catch_tx_desc->skb = 0;
20368 +               np->catch_tx_desc =
20369 +                       phys_to_virt((int)np->catch_tx_desc->descr.next);
20370 +       } while (np->catch_tx_desc != np->active_tx_desc);
20371 +       WARN_ON(np->txpackets != 0);
20372 +       np->txpackets = 0;
20373 +
20374 +       /* cleanup the rx-ring */
20375 +       for (i = 0; i < NBR_RX_DESC; i++) {
20376 +               struct sk_buff *skb;
20377 +               skb = np->dma_rx_descr_list[i].skb;
20378 +               if (!skb
20379 +                   || (np->dma_rx_descr_list[i].descr.buf !=
20380 +                       (void *)virt_to_phys(skb->data)))
20381 +               {                      
20382 +                       printk("%s:%d: damaged rx-ring! "
20383 +                              "i=%d skb=%p %lx %lx %p %p\n",
20384 +                              __func__, __LINE__, i,
20385 +                              skb,
20386 +                              virt_to_phys(skb->data),
20387 +                              virt_to_phys(skb->data + MAX_MEDIA_DATA_SIZE),
20388 +                              np->dma_rx_descr_list[i].descr.buf,
20389 +                              np->dma_rx_descr_list[i].descr.after);
20390 +                       WARN_ON(1);
20391 +                       crisv32_ethernet_bug(dev);                      
20392 +                       if (skb)
20393 +                               dev_kfree_skb(skb);
20394 +                       skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE);
20395 +                       np->dma_rx_descr_list[i].skb = skb;
20396 +                       np->dma_rx_descr_list[i].descr.buf =
20397 +                               (char*)virt_to_phys(skb->data);
20398 +               }
20399 +               np->dma_rx_descr_list[i].descr.after =
20400 +                       (char*)virt_to_phys(skb->data
20401 +                                           + MAX_MEDIA_DATA_SIZE);
20402 +               np->dma_rx_descr_list[i].descr.eol = 0;
20403 +               np->dma_rx_descr_list[i].descr.in_eop = 0;
20404 +               /* Workaround cache bug */
20405 +               flush_dma_descr(&np->dma_rx_descr_list[i].descr, 1); 
20406 +       }
20407 +       
20408 +       /* reset rx-ring */
20409 +       np->active_rx_desc = &np->dma_rx_descr_list[0];
20410 +       np->prev_rx_desc = &np->dma_rx_descr_list[NBR_RX_DESC - 1];
20411 +       np->last_rx_desc = np->prev_rx_desc;
20412 +       np->dma_rx_descr_list[NBR_RX_DESC - 1].descr.eol = 1;
20413 +       
20414 +       /* reset tx-ring */
20415 +       np->dma_tx_descr_list[0].descr.buf =
20416 +               np->dma_tx_descr_list[0].descr.after = 0;
20417 +       np->dma_rx_descr_list[i].descr.in_eop = 0;
20418 +       np->dma_tx_descr_list[0].descr.eol = 1;
20419 +       
20420 +       np->active_tx_desc = &np->dma_tx_descr_list[0];
20421 +       np->prev_tx_desc = &np->dma_tx_descr_list[NBR_TX_DESC - 1];
20422 +       np->catch_tx_desc = &np->dma_tx_descr_list[0];
20423 +       
20424 +       /* Fill context descriptors. */
20425 +       np->ctxt_in.next = 0;
20426 +       np->ctxt_in.saved_data =
20427 +               (void *)virt_to_phys(&np->active_rx_desc->descr);
20428 +       np->ctxt_in.saved_data_buf = np->active_rx_desc->descr.buf;
20429 +               
20430 +       np->ctxt_out.next = 0;
20431 +       np->ctxt_out.saved_data =
20432 +               (void *)virt_to_phys(&np->dma_tx_descr_list[0].descr);
20433 +}
20434 +
20435 +static void __init
20436 +crisv32_init_leds(int ledgrp, struct net_device* dev)
20437 +{
20438 +       struct timer_list timer_init = TIMER_INITIALIZER(NULL, 0, 0);
20439 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
20440 +
20441 +       /* Use already allocated led grp if initialized */
20442 +       if (crisv32_leds[ledgrp] != NULL) {
20443 +               np->leds = crisv32_leds[ledgrp];
20444 +               return;
20445 +       }
20446 +
20447 +       crisv32_leds[ledgrp] = kmalloc(sizeof(struct crisv32_eth_leds),GFP_KERNEL);
20448 +
20449 +       crisv32_leds[ledgrp]->ledgrp = ledgrp;
20450 +       crisv32_leds[ledgrp]->led_active = 0;
20451 +       /* NOTE: Should this value be set to zero as the jiffies timer can wrap? */
20452 +       crisv32_leds[ledgrp]->led_next_time = jiffies;
20453 +
20454 +       crisv32_leds[ledgrp]->clear_led_timer = timer_init;
20455 +       crisv32_leds[ledgrp]->clear_led_timer.function = crisv32_clear_network_leds;
20456 +       crisv32_leds[ledgrp]->clear_led_timer.data = (unsigned long) dev;
20457 +
20458 +       spin_lock_init(&crisv32_leds[ledgrp]->led_lock);
20459 +
20460 +       np->leds = crisv32_leds[ledgrp];
20461 +}
20462 +
20463 +static int __init
20464 +crisv32_ethernet_init(void)
20465 +{
20466 +       struct crisv32_ethernet_local *np;
20467 +       int ret = 0;
20468 +
20469 +       printk("ETRAX FS 10/100MBit ethernet v0.01 (c)"
20470 +              " 2003 Axis Communications AB\n");
20471 +
20472 +#ifdef CONFIG_ETRAX_ETHERNET_IFACE0
20473 +{
20474 +       int iface0 = 0;
20475 +       /* Default MAC address for interface 0.
20476 +        * The real one will be set later. */
20477 +       static struct sockaddr default_mac_iface0 = 
20478 +               {0, {0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00}};
20479 +       
20480 +       if (!(crisv32_dev[iface0] = alloc_etherdev(sizeof *np)))
20481 +               return -ENOMEM;
20482 +       
20483 +       ret |= crisv32_ethernet_device_init(crisv32_dev[iface0]);
20484 +       
20485 +#if defined(CONFIG_ETRAX_ETH0_USE_LEDGRP0)
20486 +       crisv32_init_leds(LED_GRP_0,crisv32_dev[iface0]);
20487 +#elif defined(CONFIG_ETRAX_ETH0_USE_LEDGRP1)
20488 +       crisv32_init_leds(LED_GRP_1,crisv32_dev[iface0]);
20489 +#else
20490 +       crisv32_init_leds(LED_GRP_NONE,crisv32_dev[iface0]);
20491 +#endif
20492 +
20493 +       np = (struct crisv32_ethernet_local *) crisv32_dev[iface0]->priv;
20494 +       np->eth_inst = regi_eth0;
20495 +       np->dma_out_inst = regi_dma0;
20496 +       np->dma_in_inst = regi_dma1;
20497 +       
20498 +       register_netdev(crisv32_dev[iface0]);
20499 +       
20500 +       /* Set up default MAC address */
20501 +       memcpy(crisv32_dev[iface0]->dev_addr, default_mac_iface0.sa_data, 6);
20502 +       crisv32_eth_set_mac_address(crisv32_dev[iface0], &default_mac_iface0);
20503 +       if (crisv32_eth_request_irqdma(crisv32_dev[iface0]))
20504 +               printk("%s: eth0 unable to allocate IRQ and DMA resources\n",
20505 +                      __func__);
20506 +       np->txpackets = 0;
20507 +       crisv32_eth_init_rings(crisv32_dev[iface0]);
20508 +       crisv32_eth_setup_controller(crisv32_dev[iface0]);
20509 +}
20510 +#endif /* CONFIG_ETRAX_ETHERNET_IFACE0 */
20511 +
20512 +#ifdef CONFIG_ETRAX_ETHERNET_IFACE1
20513 +{
20514 +       int iface1 = 0;
20515 +       /* Default MAC address for interface 1.
20516 +        * The real one will be set later. */
20517 +       static struct sockaddr default_mac_iface1 = 
20518 +               {0, {0x00, 0x40, 0x8C, 0xCD, 0x00, 0x01}};
20519 +
20520 +       if (crisv32_pinmux_alloc_fixed(pinmux_eth1))
20521 +               panic("Eth pinmux\n");
20522 +
20523 +       /* Increase index to device array if interface 0 is enabled as well.*/
20524 +#ifdef CONFIG_ETRAX_ETHERNET_IFACE0
20525 +       iface1++;
20526 +#endif
20527 +       if (!(crisv32_dev[iface1] = alloc_etherdev(sizeof *np)))
20528 +               return -ENOMEM;
20529 +       
20530 +       ret |= crisv32_ethernet_device_init(crisv32_dev[iface1]);
20531 +
20532 +#if defined(CONFIG_ETRAX_ETH1_USE_LEDGRP0)
20533 +       crisv32_init_leds(LED_GRP_0,crisv32_dev[iface1]);
20534 +#elif defined(CONFIG_ETRAX_ETH1_USE_LEDGRP1)
20535 +       crisv32_init_leds(LED_GRP_1,crisv32_dev[iface1]);
20536 +#else
20537 +       crisv32_init_leds(LED_GRP_NONE,crisv32_dev[iface1]);
20538 +#endif
20539 +
20540 +       np = (struct crisv32_ethernet_local *) crisv32_dev[iface1]->priv;
20541 +       np->eth_inst = regi_eth1;
20542 +       np->dma_out_inst = regi_dma6;
20543 +       np->dma_in_inst = regi_dma7;
20544 +       
20545 +       register_netdev(crisv32_dev[iface1]);
20546 +
20547 +       /* Set up default MAC address */
20548 +       memcpy(crisv32_dev[iface1]->dev_addr, default_mac_iface1.sa_data, 6);
20549 +       crisv32_eth_set_mac_address(crisv32_dev[iface1], &default_mac_iface1);
20550 +       
20551 +       if (crisv32_eth_request_irqdma(crisv32_dev[iface1]))
20552 +               printk("%s: eth1 unable to allocate IRQ and DMA resources\n",
20553 +                      __func__);
20554 +       np->txpackets = 0;
20555 +       crisv32_eth_init_rings(crisv32_dev[iface1]);
20556 +       crisv32_eth_setup_controller(crisv32_dev[iface1]);
20557 +}
20558 +#endif /* CONFIG_ETRAX_ETHERNET_IFACE1 */
20559 +
20560 +#ifdef CONFIG_CPU_FREQ
20561 +       cpufreq_register_notifier(&crisv32_ethernet_freq_notifier_block,
20562 +                                 CPUFREQ_TRANSITION_NOTIFIER);
20563 +#endif
20564 +
20565 +       return ret;
20566 +}
20567 +
20568 +static int __init
20569 +crisv32_ethernet_device_init(struct net_device* dev)
20570 +{
20571 +       struct timer_list timer_init = TIMER_INITIALIZER(NULL, 0, 0);
20572 +       struct crisv32_ethernet_local *np;
20573 +
20574 +       dev->base_addr = 0;     /* Just to have something to show. */
20575 +
20576 +       /* we do our own locking */
20577 +       dev->features |= NETIF_F_LLTX;
20578 +       
20579 +       /* We use several IRQs and DMAs so just report 0 here. */
20580 +       dev->irq = 0;
20581 +       dev->dma = 0;
20582 +
20583 +       /* 
20584 +        * Fill in our handlers so the network layer can talk to us in the
20585 +        * future. 
20586 +        */
20587 +       dev->open = crisv32_eth_open;
20588 +       dev->hard_start_xmit = crisv32_eth_send_packet;
20589 +       dev->stop = crisv32_eth_close;
20590 +       dev->get_stats = crisv32_get_stats;
20591 +       dev->set_multicast_list = crisv32_eth_set_multicast_list;
20592 +       dev->set_mac_address = crisv32_eth_set_mac_address;
20593 +       dev->ethtool_ops = &crisv32_ethtool_ops;
20594 +       dev->do_ioctl = crisv32_eth_ioctl;
20595 +       dev->set_config = crisv32_eth_set_config;
20596 +       dev->tx_timeout = crisv32_eth_tx_timeout;
20597 +#ifdef CONFIG_NET_POLL_CONTROLLER
20598 +       dev->poll_controller = crisv32_netpoll;
20599 +#endif
20600 +       
20601 +       np = netdev_priv(dev);
20602 +
20603 +       spin_lock_init(&np->lock);
20604 +       spin_lock_init(&np->transceiver_lock);
20605 +       
20606 +       /* Initialize speed indicator stuff. */
20607 +       np->current_speed = 10;
20608 +       np->current_speed_selection = 0;        /* Auto. */
20609 +       np->speed_timer = timer_init;
20610 +       np->speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
20611 +       np->speed_timer.data = (unsigned long) dev;
20612 +       np->speed_timer.function = crisv32_eth_check_speed;
20613 +
20614 +       np->full_duplex = 0;
20615 +       np->current_duplex = autoneg;
20616 +       np->duplex_timer = timer_init;
20617 +       np->duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
20618 +       np->duplex_timer.data = (unsigned long) dev;
20619 +       np->duplex_timer.function = crisv32_eth_check_duplex;
20620 +       
20621 +       return 0;
20622 +}
20623 +
20624 +static int
20625 +crisv32_eth_open(struct net_device *dev)
20626 +{
20627 +       struct sockaddr mac_addr;
20628 +       reg_dma_rw_ack_intr ack_intr = { .data = 1,.in_eop = 1 };
20629 +       reg_dma_rw_cfg dma_cfg = { .en = 1 };
20630 +       reg_eth_rw_clr_err clr_err = {.clr = regk_eth_yes};
20631 +       int intr_mask_nw = 0x1cff;
20632 +       int eth_ack_intr = 0xffff;
20633 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
20634 +
20635 +       spin_lock(&np->lock);
20636 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
20637 +       np->gigabit_mode = 0;
20638 +#endif
20639 +       crisv32_disable_tx_ints(np);
20640 +       crisv32_disable_rx_ints(np);
20641 +       
20642 +       REG_WR(eth, np->eth_inst, rw_clr_err, clr_err);
20643 +       REG_WR_INT(eth, np->eth_inst, rw_ack_intr, eth_ack_intr);
20644 +       REG_WR_INT(eth, np->eth_inst, rw_intr_mask, intr_mask_nw);
20645 +       crisv32_eth_reset_rings(dev);
20646 +       
20647 +       /* Give the hardware an idea of what MAC address we want. */
20648 +       memcpy(mac_addr.sa_data, dev->dev_addr, dev->addr_len);
20649 +       crisv32_eth_set_mac_address(dev, &mac_addr);
20650 +
20651 +       /* Enable irq and make sure that the irqs are cleared. */
20652 +       REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr);
20653 +       REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
20654 +       
20655 +       /* Prepare input DMA. */
20656 +       DMA_RESET(np->dma_in_inst);
20657 +       DMA_ENABLE(np->dma_in_inst);
20658 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
20659 +       DMA_WR_CMD(np->dma_in_inst, regk_dma_set_w_size2);
20660 +#endif
20661 +       DMA_START_CONTEXT( np->dma_in_inst, virt_to_phys(&np->ctxt_in));
20662 +       DMA_CONTINUE(np->dma_in_inst);
20663 +       crisv32_enable_rx_ints(np);
20664 +       crisv32_start_receiver(np);     
20665 +       
20666 +       /* Prepare output DMA. */
20667 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
20668 +       DMA_WR_CMD(np->dma_out_inst, regk_dma_set_w_size4);
20669 +#endif
20670 +       REG_WR(dma, np->dma_out_inst, rw_cfg, dma_cfg);
20671 +       netif_start_queue(dev);
20672 +       crisv32_enable_tx_ints(np);
20673 +       
20674 +       /* Start duplex/speed timers */
20675 +       add_timer(&np->speed_timer);
20676 +       add_timer(&np->duplex_timer);   
20677 +
20678 +       spin_unlock(&np->lock);
20679 +       /* 
20680 +        * We are now ready to accept transmit requeusts from the queueing
20681 +        * layer of the networking.
20682 +        */
20683 +       netif_carrier_on(dev);
20684 +
20685 +       return 0;
20686 +}
20687 +
20688 +static int
20689 +crisv32_eth_close(struct net_device *dev)
20690 +{
20691 +       reg_dma_rw_ack_intr ack_intr = {0};
20692 +
20693 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
20694 +       unsigned long flags;
20695 +
20696 +       printk(KERN_INFO "Closing %s.\n", dev->name);
20697 +
20698 +       /* stop the receiver before the DMA channels to avoid overruns. */
20699 +       crisv32_stop_receiver(np);
20700 +       
20701 +       spin_lock_irqsave(&np->lock, flags);
20702 +       netif_stop_queue(dev);
20703 +
20704 +       /* Reset the TX DMA in case it has hung on something. */
20705 +       DMA_RESET(np->dma_in_inst);
20706 +
20707 +       /* Stop DMA */
20708 +       DMA_STOP(np->dma_in_inst);
20709 +       DMA_STOP(np->dma_out_inst);
20710 +
20711 +       /* Disable irq and make sure that the irqs are cleared. */
20712 +       crisv32_disable_tx_ints(np);
20713 +       ack_intr.data = 1;
20714 +       REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr);
20715 +
20716 +       crisv32_disable_rx_ints(np);
20717 +       ack_intr.in_eop = 1;
20718 +       REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
20719 +
20720 +       np->sender_started = 0; 
20721 +       spin_unlock_irqrestore(&np->lock, flags);
20722 +
20723 +       /* Update the statistics. */
20724 +       update_rx_stats(np);
20725 +       update_tx_stats(np);
20726 +
20727 +       /* Stop speed/duplex timers */
20728 +       del_timer(&np->speed_timer);
20729 +       del_timer(&np->duplex_timer);
20730 +
20731 +       return 0;
20732 +}
20733 +
20734 +static int
20735 +crisv32_eth_set_mac_address(struct net_device *dev, void *vpntr)
20736 +{
20737 +       int i;
20738 +       unsigned char *addr = ((struct sockaddr*)vpntr)->sa_data;
20739 +       
20740 +       reg_eth_rw_ma0_lo ma0_lo =
20741 +         { addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24)};
20742 +       
20743 +       reg_eth_rw_ma0_hi ma0_hi = { addr[4] | (addr[5] << 8) };
20744 +
20745 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
20746 +
20747 +       /* Remember the address. */
20748 +       memcpy(dev->dev_addr, addr, dev->addr_len);
20749 +
20750 +       /* 
20751 +        * Write the address to the hardware.
20752 +        * Note the way the address is wrapped:
20753 +        * ma0_l0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24);
20754 +        * ma0_hi = a0_4 | (a0_5 << 8);
20755 +        */
20756 +       REG_WR(eth, np->eth_inst, rw_ma0_lo, ma0_lo);
20757 +       REG_WR(eth, np->eth_inst, rw_ma0_hi, ma0_hi);
20758 +
20759 +       printk(KERN_INFO "%s: changed MAC to ", dev->name);
20760 +
20761 +       for (i = 0; i < 5; i++)
20762 +               printk("%02X:", dev->dev_addr[i]);
20763 +
20764 +       printk("%02X\n", dev->dev_addr[i]);
20765 +
20766 +       return 0;
20767 +}
20768 +
20769 +static irqreturn_t
20770 +crisv32rx_eth_interrupt(int irq, void *dev_id)
20771 +{
20772 +       reg_dma_r_masked_intr masked_in;
20773 +       reg_dma_rw_cmd cmd = {0};
20774 +       reg_dma_rw_ack_intr ack_intr = {0};     
20775 +       struct net_device *dev = (struct net_device *) dev_id;
20776 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
20777 +
20778 +       masked_in = REG_RD(dma, np->dma_in_inst, r_masked_intr);
20779 +       
20780 +       if (masked_in.in_eop) {
20781 +               DEBUG(printk("EOP_IN interrupt\n"));
20782 +
20783 +               /* Acknowledge input dma interrupt. */
20784 +               ack_intr.in_eop = 1;
20785 +               REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
20786 +
20787 +               np->new_rx_package = 1;
20788 +               /* Check if complete packets were indeed received. */
20789 +               while (np->active_rx_desc->descr.in_eop == 1
20790 +                      && np->new_rx_package) {
20791 +                       /*
20792 +                        * Take out the buffer and give it to the OS, then
20793 +                        * allocate a new buffer to put a packet in.
20794 +                        */
20795 +                       crisv32_eth_receive_packet(dev);
20796 +                       
20797 +                       /* Update number of packets received. */
20798 +                       np->stats.rx_packets++;
20799 +
20800 +                       /* Restarts input dma. */
20801 +                       cmd.cont_data = 1;
20802 +                       REG_WR(dma, np->dma_in_inst, rw_cmd, cmd);
20803 +
20804 +                       /* Acknowledge input dma interrupt. */
20805 +                       REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
20806 +               }
20807 +       }
20808 +       return IRQ_HANDLED;
20809 +}
20810 +
20811 +static irqreturn_t
20812 +crisv32tx_eth_interrupt(int irq, void *dev_id)
20813 +{
20814 +       reg_dma_rw_stat stat;
20815 +       dma_descr_data *dma_pos;
20816 +       reg_dma_rw_ack_intr ack_intr = { .data = 1 };
20817 +       reg_dma_r_masked_intr masked_out;
20818 +       
20819 +       struct net_device *dev = (struct net_device *) dev_id;
20820 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
20821 +       unsigned long flags;
20822 +       
20823 +       masked_out = REG_RD(dma, np->dma_out_inst, r_masked_intr);
20824 +               
20825 +       /* Get the current output dma position. */
20826 +       stat = REG_RD(dma, np->dma_out_inst, rw_stat);
20827 +       if (stat.list_state == regk_dma_data_at_eol)
20828 +               dma_pos = &np->active_tx_desc->descr;
20829 +       else
20830 +               dma_pos = phys_to_virt(REG_RD_INT(dma, np->dma_out_inst,
20831 +                                                 rw_data));
20832 +       
20833 +       /* ack the interrupt */
20834 +       REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr);
20835 +       
20836 +       /* protect against ethernet excessive-col interrupts */
20837 +       spin_lock_irqsave(&np->lock, flags);
20838 +
20839 +       /* Take care of transmited dma descriptors and report sent packet. */
20840 +       while (np->txpackets && ((&np->catch_tx_desc->descr != dma_pos)
20841 +                                || netif_queue_stopped(dev))) {
20842 +               /* Update sent packet statistics. */
20843 +               np->stats.tx_bytes += np->catch_tx_desc->skb->len;
20844 +               np->stats.tx_packets++;
20845 +               
20846 +               dev_kfree_skb_irq(np->catch_tx_desc->skb);
20847 +               np->catch_tx_desc->skb = 0;
20848 +               np->txpackets--;
20849 +               np->catch_tx_desc =
20850 +                       phys_to_virt((int)np->catch_tx_desc->descr.next);
20851 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
20852 +               if (np->gigabit_mode) {
20853 +                 np->intmem_tx_buf_catch->free = 1;
20854 +                 np->intmem_tx_buf_catch = np->intmem_tx_buf_catch->next;
20855 +               }
20856 +#endif
20857 +               netif_wake_queue(dev);
20858 +       }
20859 +       spin_unlock_irqrestore(&np->lock, flags);
20860 +       return IRQ_HANDLED;
20861 +}
20862 +
20863 +
20864 +/* Update receive errors. */
20865 +static void
20866 +update_rx_stats(struct crisv32_ethernet_local *np)
20867 +{
20868 +       reg_eth_rs_rec_cnt r;
20869 +       reg_eth_rs_phy_cnt rp;
20870 +
20871 +       r = REG_RD(eth, np->eth_inst, rs_rec_cnt);
20872 +       rp = REG_RD(eth, np->eth_inst, rs_phy_cnt);
20873 +
20874 +       np->stats.rx_fifo_errors += r.congestion;
20875 +       np->stats.rx_crc_errors += r.crc_err;
20876 +       np->stats.rx_frame_errors += r.align_err;
20877 +       np->stats.rx_length_errors += r.oversize;
20878 +}
20879 +
20880 +/* Update transmit errors. */
20881 +static void
20882 +update_tx_stats(struct crisv32_ethernet_local *np)
20883 +{
20884 +       reg_eth_rs_tr_cnt r;
20885 +
20886 +       r = REG_RD(eth, np->eth_inst, rs_tr_cnt);
20887 +
20888 +       np->stats.collisions += r.single_col + r.mult_col;
20889 +       np->stats.tx_errors += r.deferred;
20890 +}
20891 +
20892 +/* Get current statistics. */
20893 +static struct net_device_stats *
20894 +crisv32_get_stats(struct net_device *dev)
20895 +{
20896 +       unsigned long flags;
20897 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
20898 +       
20899 +       spin_lock_irqsave(&np->lock, flags);
20900 +
20901 +       update_rx_stats(np);
20902 +       update_tx_stats(np);
20903 +
20904 +       spin_unlock_irqrestore(&np->lock, flags);
20905 +
20906 +       return &np->stats;
20907 +}
20908 +
20909 +/* Check for network errors. This acknowledge the received interrupt. */
20910 +static irqreturn_t
20911 +crisv32nw_eth_interrupt(int irq, void *dev_id)
20912 +{
20913 +       struct net_device *dev = (struct net_device *) dev_id;
20914 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
20915 +       reg_eth_r_masked_intr intr_mask;
20916 +       int ack_intr = 0xffff;
20917 +       reg_eth_rw_clr_err clr_err;
20918 +
20919 +       intr_mask = REG_RD(eth, np->eth_inst, r_masked_intr);
20920 +
20921 +       /*
20922 +        * Check for underrun and/or excessive collisions. Note that the
20923 +        * rw_clr_err register clears both underrun and excessive collision
20924 +        * errors, so there's no need to check them separately.
20925 +        */
20926 +       if (np->sender_started
20927 +           && (intr_mask.urun || intr_mask.exc_col)) {
20928 +               unsigned long flags;
20929 +               dma_descr_data *dma_pos;
20930 +               reg_dma_rw_stat stat;
20931 +
20932 +               /* Get the current output dma position. */
20933 +               stat = REG_RD(dma, np->dma_out_inst, rw_stat);
20934 +               if (stat.list_state == regk_dma_data_at_eol)
20935 +                       dma_pos = &np->active_tx_desc->descr;
20936 +               else
20937 +                       dma_pos = phys_to_virt(REG_RD_INT(dma,
20938 +                                                         np->dma_out_inst,
20939 +                                                         rw_data));
20940 +
20941 +               /*
20942 +                * Protect against the tx-interrupt messing with
20943 +                * the tx-ring.
20944 +                */
20945 +               spin_lock_irqsave(&np->lock, flags);
20946 +               /*
20947 +                * If we have more than one packet in the tx-ring
20948 +                * drop one and move ahead. Upper layers rely on
20949 +                * packeloss when doing congestion control.
20950 +                */
20951 +               if (intr_mask.exc_col && np->txpackets > 1) {
20952 +                       dev_kfree_skb_irq(np->catch_tx_desc->skb);
20953 +                       np->catch_tx_desc->skb = 0;
20954 +                       np->catch_tx_desc = 
20955 +                               phys_to_virt((int)
20956 +                                            np->catch_tx_desc->descr.next);
20957 +                       np->txpackets--;
20958 +                       netif_wake_queue(dev);
20959 +               }
20960 +               np->ctxt_out.next = 0;
20961 +               if (np->txpackets) {
20962 +                       np->ctxt_out.saved_data = (void *)
20963 +                               virt_to_phys(&np->catch_tx_desc->descr);
20964 +                       np->ctxt_out.saved_data_buf =
20965 +                               np->catch_tx_desc->descr.buf;
20966 +                       
20967 +                       /* restart the DMA */
20968 +                       DMA_START_CONTEXT(np->dma_out_inst,
20969 +                                         (int) virt_to_phys(&np->ctxt_out));
20970 +               }
20971 +               else {
20972 +                       /* let the next packet restart the DMA */
20973 +                       np->ctxt_out.saved_data = (void *)
20974 +                               virt_to_phys(&np->active_tx_desc->descr);
20975 +                       np->sender_started = 0;
20976 +               }
20977 +               
20978 +               spin_unlock_irqrestore(&np->lock, flags);
20979 +               np->stats.tx_errors++;
20980 +       }
20981 +
20982 +       REG_WR_INT(eth, np->eth_inst, rw_ack_intr, ack_intr);
20983 +       clr_err.clr = 1;
20984 +       REG_WR(eth, np->eth_inst, rw_clr_err, clr_err);
20985 +       
20986 +       update_rx_stats(np);
20987 +       update_tx_stats(np);
20988 +       
20989 +       return IRQ_HANDLED;
20990 +}
20991 +
20992 +/* We have a good packet(s), get it/them out of the buffers. */
20993 +static void
20994 +crisv32_eth_receive_packet(struct net_device *dev)
20995 +{
20996 +       int length;
20997 +       struct sk_buff *skb;
20998 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
20999 +       struct sk_buff *tmp;
21000 +       unsigned long flags;
21001 +
21002 +       DEBUG(printk("crisv32_receive_packet\n"));
21003 +
21004 +       /* Activate LED */
21005 +       spin_lock_irqsave(&np->leds->led_lock, flags);
21006 +       if (!np->leds->led_active && time_after(jiffies, np->leds->led_next_time)) {
21007 +               /* light the network leds depending on the current speed. */
21008 +               crisv32_set_network_leds(LED_ACTIVITY, dev);
21009 +
21010 +               /* Set the earliest time we may clear the LED */
21011 +               np->leds->led_next_time = jiffies + NET_FLASH_TIME;
21012 +               np->leds->led_active = 1;
21013 +               np->leds->clear_led_timer.data = (unsigned long) dev;
21014 +               mod_timer(&np->leds->clear_led_timer, jiffies + HZ/10);
21015 +       }
21016 +       spin_unlock_irqrestore(&np->leds->led_lock, flags);
21017 +
21018 +       /* Discard CRC (4 bytes). */
21019 +       length = (np->active_rx_desc->descr.after) - 
21020 +               (np->active_rx_desc->descr.buf) - 4;
21021 +
21022 +       /* Update received packet statistics. */
21023 +       np->stats.rx_bytes += length;
21024 +
21025 +       if (np->active_rx_desc != np->last_rx_desc) {
21026 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21027 +               if (np->gigabit_mode) {
21028 +                       skb = dev_alloc_skb(length);
21029 +                       if(!skb) {
21030 +                               np->stats.rx_errors++;
21031 +                               printk(KERN_NOTICE "%s: memory squeeze,"
21032 +                                      " dropping packet.", dev->name);
21033 +                               return;
21034 +                       }
21035 +                       /* Allocate room for the packet body. */
21036 +                       skb_put(skb, length - ETHER_HEAD_LEN);
21037 +                       /* Allocate room for the header and copy the data to
21038 +                        * the SKB */
21039 +                       memcpy(skb_push(skb, ETHER_HEAD_LEN),
21040 +                              crisv32_intmem_phys_to_virt((unsigned long)np->active_rx_desc->descr.buf), length);
21041 +                       skb->dev = dev;
21042 +                       skb->protocol = eth_type_trans(skb, dev);
21043 +                       skb->ip_summed = CHECKSUM_NONE;
21044 +                       /* Send the packet to the upper layer. */
21045 +                       netif_rx(skb);
21046 +                       np->last_rx_desc =
21047 +                               (void *) phys_to_virt(np->last_rx_desc->descr.next);
21048 +               } else {
21049 +#endif
21050 +                       tmp = dev_alloc_skb(MAX_MEDIA_DATA_SIZE);
21051 +                       if (!tmp) {
21052 +                               np->stats.rx_errors++;
21053 +                               printk(KERN_NOTICE "%s: memory squeeze,"
21054 +                                      " dropping packet.",
21055 +                                      dev->name);
21056 +                               return;
21057 +                       }
21058 +                       skb = np->active_rx_desc->skb;
21059 +                       np->active_rx_desc->skb = tmp;
21060 +                       skb_put(skb, length);
21061 +
21062 +                       np->active_rx_desc->descr.buf =
21063 +                               (void *) virt_to_phys(np->active_rx_desc->skb->data);
21064 +                       np->active_rx_desc->descr.after =
21065 +                               np->active_rx_desc->descr.buf + MAX_MEDIA_DATA_SIZE;
21066 +
21067 +                       skb->dev = dev;
21068 +                       skb->protocol = eth_type_trans(skb, dev);
21069 +                       skb->ip_summed = CHECKSUM_NONE;
21070 +
21071 +                       /* Send the packet to the upper layer. */
21072 +                       netif_rx(skb);
21073 +                       np->last_rx_desc =
21074 +                               phys_to_virt((int)
21075 +                                            np->last_rx_desc->descr.next);
21076 +               }
21077 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21078 +       }
21079 +#endif
21080 +       /*
21081 +        * When the input DMA reaches eol precaution must be taken, otherwise
21082 +        * the DMA could stop. The problem occurs if the eol flag is re-placed
21083 +        * on the descriptor that the DMA stands on before the DMA proceed to
21084 +        * the next descriptor. This case could, for example, happen if there
21085 +        * is a traffic burst and then the network goes silent. To prevent this
21086 +        * we make sure that we do not set the eol flag on the descriptor that
21087 +        * the DMA stands on.
21088 +        */
21089 +       if(virt_to_phys(&np->active_rx_desc->descr) !=
21090 +          REG_RD_INT(dma, np->dma_in_inst, rw_saved_data)) {
21091 +               np->active_rx_desc->descr.after =
21092 +                       np->active_rx_desc->descr.buf + MAX_MEDIA_DATA_SIZE;
21093 +               np->active_rx_desc->descr.eol = 1;
21094 +               np->active_rx_desc->descr.in_eop = 0;
21095 +               np->active_rx_desc =
21096 +                       phys_to_virt((int)np->active_rx_desc->descr.next);
21097 +               barrier();
21098 +               np->prev_rx_desc->descr.eol = 0;
21099 +               flush_dma_descr(&np->prev_rx_desc->descr, 0); // Workaround cache bug
21100 +               np->prev_rx_desc =
21101 +                       phys_to_virt((int)np->prev_rx_desc->descr.next);
21102 +               flush_dma_descr(&np->prev_rx_desc->descr, 1); // Workaround cache bug
21103 +       } else {
21104 +               np->new_rx_package = 0;
21105 +       }
21106 +}
21107 +
21108 +/* 
21109 + * This function (i.e. hard_start_xmit) is protected from concurent calls by a
21110 + * spinlock (xmit_lock) in the net_device structure.
21111 + */
21112 +static int
21113 +crisv32_eth_send_packet(struct sk_buff *skb, struct net_device *dev)
21114 +{
21115 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21116 +       unsigned char *buf = skb->data;
21117 +       unsigned long flags;
21118 +
21119 +       dev->trans_start = jiffies;
21120 +       
21121 +       spin_lock_irqsave(&np->leds->led_lock, flags);
21122 +       if (!np->leds->led_active && time_after(jiffies, np->leds->led_next_time)) {
21123 +               /* light the network leds depending on the current speed. */
21124 +               crisv32_set_network_leds(LED_ACTIVITY, dev);
21125 +
21126 +               /* Set the earliest time we may clear the LED */
21127 +               np->leds->led_next_time = jiffies + NET_FLASH_TIME;
21128 +               np->leds->led_active = 1;
21129 +               np->leds->clear_led_timer.data = (unsigned long) dev;
21130 +               mod_timer(&np->leds->clear_led_timer, jiffies + HZ/10);
21131 +       }
21132 +       spin_unlock_irqrestore(&np->leds->led_lock, flags);
21133 +
21134 +       /*
21135 +        * Need to disable irq to avoid updating pointer in interrupt while
21136 +        * sending packets.
21137 +        */
21138 +       spin_lock_irqsave(&np->lock, flags);
21139 +       
21140 +       np->active_tx_desc->skb = skb;
21141 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21142 +       if (np->gigabit_mode) {
21143 +               if(np->intmem_tx_buf_active->free) {
21144 +                       memcpy(np->intmem_tx_buf_active->buf,
21145 +                              skb->data, skb->len);
21146 +                       np->intmem_tx_buf_active->free = 0;
21147 +                       crisv32_eth_hw_send_packet(
21148 +                               np->intmem_tx_buf_active->buf, skb->len, np);
21149 +                       np->intmem_tx_buf_active =
21150 +                               np->intmem_tx_buf_active->next;
21151 +               } else {
21152 +                       printk("%s: Internal tx memory buffer not free!\n\r",
21153 +                              __FILE__);
21154 +                       spin_unlock_irqrestore(&np->lock, flags);
21155 +                       return 1;
21156 +               }
21157 +       }
21158 +       else 
21159 +#endif
21160 +       {
21161 +               crisv32_eth_hw_send_packet(buf, skb->len, np);
21162 +       }
21163 +       /* Stop queue if full. */
21164 +       if (np->active_tx_desc == np->catch_tx_desc)
21165 +               netif_stop_queue(dev);
21166 +       
21167 +       np->txpackets++;
21168 +       spin_unlock_irqrestore(&np->lock, flags);
21169 +       
21170 +       return 0;
21171 +}
21172 +
21173 +
21174 +static void
21175 +crisv32_eth_hw_send_packet(unsigned char *buf, int length, void *priv)
21176 +{
21177 +       struct crisv32_ethernet_local *np =
21178 +               (struct crisv32_ethernet_local *) priv;
21179 +
21180 +       /* Configure the tx dma descriptor. */
21181 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21182 +       if (np->gigabit_mode) {
21183 +         np->active_tx_desc->descr.buf = (unsigned char *) crisv32_intmem_virt_to_phys(buf);
21184 +       } else 
21185 +#endif
21186 +       {
21187 +         np->active_tx_desc->descr.buf = (unsigned char *) virt_to_phys(buf);
21188 +       }
21189 +         
21190 +       np->active_tx_desc->descr.after = np->active_tx_desc->descr.buf +
21191 +               length;
21192 +       np->active_tx_desc->descr.intr = 1;        
21193 +       np->active_tx_desc->descr.out_eop = 1;
21194 +
21195 +       /* Move eol. */
21196 +       np->active_tx_desc->descr.eol = 1;
21197 +       np->prev_tx_desc->descr.eol = 0;
21198 +
21199 +
21200 +       /* Update pointers. */
21201 +       np->prev_tx_desc = np->active_tx_desc;
21202 +       np->active_tx_desc = phys_to_virt((int)np->active_tx_desc->descr.next);
21203 +
21204 +       /* Start DMA. */
21205 +       crisv32_start_dma_out(np);
21206 +}
21207 +
21208 +static void
21209 +crisv32_start_dma_out(struct crisv32_ethernet_local* np)
21210 +{
21211 +       if (!np->sender_started) {
21212 +               /* Start DMA for the first time. */
21213 +               np->ctxt_out.saved_data_buf = np->prev_tx_desc->descr.buf;
21214 +               REG_WR(dma, np->dma_out_inst, rw_group_down,
21215 +                      (int) virt_to_phys(&np->ctxt_out));
21216 +               DMA_WR_CMD(np->dma_out_inst, regk_dma_load_c);
21217 +               DMA_WR_CMD(np->dma_out_inst, regk_dma_load_d | regk_dma_burst);
21218 +               np->sender_started = 1;
21219 +       } else {
21220 +               DMA_CONTINUE_DATA(np->dma_out_inst);
21221 +       }
21222 +}
21223 +
21224 +/* 
21225 + * Called by upper layers if they decide it took too long to complete sending
21226 + * a packet - we need to reset and stuff.
21227 + */
21228 +static void
21229 +crisv32_eth_tx_timeout(struct net_device *dev)
21230 +{
21231 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21232 +       reg_dma_rw_cfg cfg = {0};
21233 +       reg_dma_rw_stat stat = {0};
21234 +       unsigned long flags;
21235 +
21236 +       printk(KERN_WARNING "%s: transmit timed out\n", dev->name);
21237 +
21238 +       
21239 +       spin_lock_irqsave(&np->lock, flags);
21240 +       crisv32_ethernet_bug(dev);
21241 +
21242 +       np->txpackets = 0;
21243 +       /* Update error stats. */
21244 +       np->stats.tx_errors++; 
21245 +
21246 +       /* Reset the TX DMA in case it has hung on something. */
21247 +       cfg.en = 0;
21248 +       REG_WR(dma, np->dma_out_inst, rw_cfg, cfg);
21249 +       
21250 +       do {
21251 +               stat = REG_RD(dma, np->dma_out_inst, rw_stat);
21252 +       } while (stat.mode != regk_dma_rst);
21253 +
21254 +       /* Reset the tranceiver. */
21255 +       crisv32_eth_reset_tranceiver(dev);
21256 +
21257 +       /* Get rid of the packets that never got an interrupt. */
21258 +       do {
21259 +               if (np->catch_tx_desc->skb)
21260 +                       dev_kfree_skb(np->catch_tx_desc->skb);
21261 +               
21262 +               np->catch_tx_desc->skb = 0;
21263 +               np->catch_tx_desc =
21264 +                       phys_to_virt((int)np->catch_tx_desc->descr.next);
21265 +       } while (np->catch_tx_desc != np->active_tx_desc);
21266 +
21267 +
21268 +       /* Start output DMA. */
21269 +       REG_WR(dma, np->dma_out_inst, rw_group_down,
21270 +              (int) virt_to_phys(&np->ctxt_out));
21271 +       DMA_WR_CMD(np->dma_out_inst, regk_dma_load_c);
21272 +       DMA_WR_CMD(np->dma_out_inst, regk_dma_load_d | regk_dma_burst);
21273 +       spin_unlock_irqrestore(&np->lock, flags);
21274 +
21275 +       /* Tell the upper layers we're ok again. */
21276 +       netif_wake_queue(dev);
21277 +}
21278 +
21279 +/*
21280 + * Set or clear the multicast filter for this adaptor.
21281 + * num_addrs == -1     Promiscuous mode, receive all packets
21282 + * num_addrs == 0      Normal mode, clear multicast list
21283 + * num_addrs > 0       Multicast mode, receive normal and MC packets,
21284 + *                     and do best-effort filtering.
21285 + */
21286 +static void
21287 +crisv32_eth_set_multicast_list(struct net_device *dev)
21288 +{
21289 +       int num_addr = dev->mc_count;
21290 +       unsigned long int lo_bits;
21291 +       unsigned long int hi_bits;
21292 +       reg_eth_rw_rec_ctrl rec_ctrl = {0};
21293 +       reg_eth_rw_ga_lo ga_lo = {0};
21294 +       reg_eth_rw_ga_hi ga_hi = {0};
21295 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21296 +
21297 +       if (dev->flags & IFF_PROMISC) {
21298 +               /* Promiscuous mode. */
21299 +               lo_bits = 0xfffffffful;
21300 +               hi_bits = 0xfffffffful;
21301 +
21302 +               /* Enable individual receive. */
21303 +               rec_ctrl = (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst,
21304 +                                                       rw_rec_ctrl);
21305 +               rec_ctrl.individual = regk_eth_yes;
21306 +               REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
21307 +       } else if (dev->flags & IFF_ALLMULTI) {
21308 +               /* Enable all multicasts. */
21309 +               lo_bits = 0xfffffffful;
21310 +               hi_bits = 0xfffffffful;
21311 +
21312 +               /* Disable individual receive */
21313 +               rec_ctrl =
21314 +                 (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst, rw_rec_ctrl);
21315 +               rec_ctrl.individual = regk_eth_no;
21316 +               REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
21317 +       } else if (num_addr == 0) {
21318 +               /* Normal, clear the mc list. */
21319 +               lo_bits = 0x00000000ul;
21320 +               hi_bits = 0x00000000ul;
21321 +
21322 +               /* Disable individual receive */
21323 +               rec_ctrl =
21324 +                 (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst, rw_rec_ctrl);
21325 +               rec_ctrl.individual = regk_eth_no;
21326 +               REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
21327 +       } else {
21328 +               /* MC mode, receive normal and MC packets. */
21329 +               char hash_ix;
21330 +               struct dev_mc_list *dmi = dev->mc_list;
21331 +               int i;
21332 +               char *baddr;
21333 +               lo_bits = 0x00000000ul;
21334 +               hi_bits = 0x00000000ul;
21335 +               
21336 +               for (i = 0; i < num_addr; i++) {
21337 +                       /* Calculate the hash index for the GA registers. */
21338 +                       hash_ix = 0;
21339 +                       baddr = dmi->dmi_addr;
21340 +                       hash_ix ^= (*baddr) & 0x3f;
21341 +                       hash_ix ^= ((*baddr) >> 6) & 0x03;
21342 +                       ++baddr;
21343 +                       hash_ix ^= ((*baddr) << 2) & 0x03c;
21344 +                       hash_ix ^= ((*baddr) >> 4) & 0xf;
21345 +                       ++baddr;
21346 +                       hash_ix ^= ((*baddr) << 4) & 0x30;
21347 +                       hash_ix ^= ((*baddr) >> 2) & 0x3f;
21348 +                       ++baddr;
21349 +                       hash_ix ^= (*baddr) & 0x3f;
21350 +                       hash_ix ^= ((*baddr) >> 6) & 0x03;
21351 +                       ++baddr;
21352 +                       hash_ix ^= ((*baddr) << 2) & 0x03c;
21353 +                       hash_ix ^= ((*baddr) >> 4) & 0xf;
21354 +                       ++baddr;
21355 +                       hash_ix ^= ((*baddr) << 4) & 0x30;
21356 +                       hash_ix ^= ((*baddr) >> 2) & 0x3f;
21357 +
21358 +                       hash_ix &= 0x3f;
21359 +
21360 +                       if (hash_ix > 32)
21361 +                               hi_bits |= (1 << (hash_ix - 32));
21362 +                       else
21363 +                               lo_bits |= (1 << hash_ix);
21364 +
21365 +                       dmi = dmi->next;
21366 +               }
21367 +               
21368 +               /* Disable individual receive. */
21369 +               rec_ctrl =
21370 +                 (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst, rw_rec_ctrl);
21371 +               rec_ctrl.individual = regk_eth_no;
21372 +               REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
21373 +       }
21374 +
21375 +       ga_lo.tbl = (unsigned int) lo_bits;
21376 +       ga_hi.tbl = (unsigned int) hi_bits;
21377 +
21378 +       REG_WR(eth, np->eth_inst, rw_ga_lo, ga_lo);
21379 +       REG_WR(eth, np->eth_inst, rw_ga_hi, ga_hi);
21380 +}
21381 +
21382 +static int
21383 +crisv32_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
21384 +{
21385 +       struct mii_ioctl_data *data = if_mii(ifr);
21386 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21387 +       int old_autoneg;
21388 +
21389 +       spin_lock(&np->lock); /* Preempt protection */
21390 +       switch (cmd) {
21391 +               case SIOCGMIIPHY: /* Get PHY address */
21392 +                       data->phy_id = np->mdio_phy_addr;
21393 +                       break;
21394 +               case SIOCGMIIREG: /* Read MII register */
21395 +                       data->val_out = crisv32_eth_get_mdio_reg(dev,
21396 +                                                                data->reg_num);
21397 +                       break;
21398 +               case SIOCSMIIREG: /* Write MII register */
21399 +                       crisv32_eth_set_mdio_reg(dev, data->reg_num,
21400 +                                                data->val_in);
21401 +                       break;
21402 +               case SET_ETH_ENABLE_LEDS:
21403 +                       use_network_leds = 1;
21404 +                       break;
21405 +               case SET_ETH_DISABLE_LEDS:
21406 +                       use_network_leds = 0;
21407 +                       break;
21408 +               case SET_ETH_AUTONEG:
21409 +                       old_autoneg = autoneg_normal;
21410 +                       autoneg_normal = *(int*)data;
21411 +                       if (autoneg_normal != old_autoneg)
21412 +                               crisv32_eth_negotiate(dev);
21413 +                       break;
21414 +       default:
21415 +               spin_unlock(&np->lock); /* Preempt protection */
21416 +               return -EINVAL;
21417 +       }
21418 +       spin_unlock(&np->lock);
21419 +       return 0;
21420 +}
21421 +
21422 +static int crisv32_eth_get_settings(struct net_device *dev,
21423 +                            struct ethtool_cmd *ecmd)
21424 +{
21425 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21426 +       /* What about GMII and 1000xpause? not included in ethtool.h */
21427 +       ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII |
21428 +                         SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
21429 +                         SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
21430 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21431 +       ecmd->supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
21432 +#endif
21433 +       ecmd->port = PORT_TP;
21434 +       ecmd->transceiver = XCVR_EXTERNAL;
21435 +       ecmd->phy_address = np->mdio_phy_addr;
21436 +       ecmd->speed = np->current_speed;
21437 +       ecmd->duplex = np->full_duplex;
21438 +       ecmd->advertising = ADVERTISED_TP;
21439 +
21440 +       if (np->current_duplex == autoneg && np->current_speed_selection == 0)
21441 +               ecmd->advertising |= ADVERTISED_Autoneg;
21442 +       else {
21443 +               ecmd->advertising |=
21444 +                       ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
21445 +                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
21446 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21447 +               ecmd->advertising |= ADVERTISED_1000baseT_Half |
21448 +                       ADVERTISED_1000baseT_Full;
21449 +#endif
21450 +               if (np->current_speed_selection == 10)
21451 +                       ecmd->advertising &= ~(ADVERTISED_100baseT_Half |
21452 +                                              ADVERTISED_100baseT_Full |
21453 +                                              ADVERTISED_1000baseT_Half |
21454 +                                              ADVERTISED_1000baseT_Full);
21455 +
21456 +               else if (np->current_speed_selection == 100)
21457 +                       ecmd->advertising &= ~(ADVERTISED_10baseT_Half |
21458 +                                              ADVERTISED_10baseT_Full |
21459 +                                              ADVERTISED_1000baseT_Half |
21460 +                                              ADVERTISED_1000baseT_Full);
21461 +
21462 +               else if (np->current_speed_selection == 1000)
21463 +                       ecmd->advertising &= ~(ADVERTISED_10baseT_Half |
21464 +                                              ADVERTISED_10baseT_Full |
21465 +                                              ADVERTISED_100baseT_Half |
21466 +                                              ADVERTISED_100baseT_Full);
21467 +
21468 +               if (np->current_duplex == half)
21469 +                       ecmd->advertising &= ~(ADVERTISED_10baseT_Full |
21470 +                                              ADVERTISED_100baseT_Full |
21471 +                                              ADVERTISED_1000baseT_Full);
21472 +               else if (np->current_duplex == full)
21473 +                       ecmd->advertising &= ~(ADVERTISED_10baseT_Half |
21474 +                                              ADVERTISED_100baseT_Half |
21475 +                                              ADVERTISED_1000baseT_Half);
21476 +       }
21477 +
21478 +       ecmd->autoneg = AUTONEG_ENABLE;
21479 +       return 0;
21480 +}
21481 +
21482 +static int crisv32_eth_set_settings(struct net_device *dev,
21483 +                            struct ethtool_cmd *ecmd)
21484 +{
21485 +       if (ecmd->autoneg == AUTONEG_ENABLE) {
21486 +               crisv32_eth_set_duplex(dev, autoneg);
21487 +               crisv32_eth_set_speed(dev, 0);
21488 +       } else {
21489 +               crisv32_eth_set_duplex(dev, ecmd->duplex);
21490 +               crisv32_eth_set_speed(dev, ecmd->speed);
21491 +       }
21492 +
21493 +       return 0;
21494 +}
21495 +
21496 +static void crisv32_eth_get_drvinfo(struct net_device *dev,
21497 +                            struct ethtool_drvinfo *info)
21498 +{
21499 +       strncpy(info->driver, "ETRAX FS", sizeof(info->driver) - 1);
21500 +       strncpy(info->version, "$Revision: 1.96 $", sizeof(info->version) - 1);
21501 +       strncpy(info->fw_version, "N/A", sizeof(info->fw_version) - 1);
21502 +       strncpy(info->bus_info, "N/A", sizeof(info->bus_info) - 1);
21503 +}
21504 +
21505 +static int crisv32_eth_nway_reset(struct net_device *dev)
21506 +{
21507 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21508 +
21509 +       if (np->current_duplex == autoneg && np->current_speed_selection == 0)
21510 +               crisv32_eth_negotiate(dev);
21511 +       return 0;
21512 +}
21513 +
21514 +static struct ethtool_ops crisv32_ethtool_ops = {
21515 +       .get_settings   = crisv32_eth_get_settings,
21516 +       .set_settings   = crisv32_eth_set_settings,
21517 +       .get_drvinfo    = crisv32_eth_get_drvinfo,
21518 +       .nway_reset     = crisv32_eth_nway_reset,
21519 +       .get_link       = ethtool_op_get_link,
21520 +};
21521 +
21522 +/* Is this function really needed? Use ethtool instead? */
21523 +static int
21524 +crisv32_eth_set_config(struct net_device *dev, struct ifmap *map)
21525 +{
21526 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21527 +
21528 +       spin_lock(&np->lock); /* Preempt protection */
21529 +
21530 +       switch(map->port) {
21531 +               case IF_PORT_UNKNOWN:
21532 +                       /* Use autoneg */
21533 +                       crisv32_eth_set_speed(dev, 0);
21534 +                       crisv32_eth_set_duplex(dev, autoneg);
21535 +                       break;
21536 +               case IF_PORT_10BASET:
21537 +                       crisv32_eth_set_speed(dev, 10);
21538 +                       crisv32_eth_set_duplex(dev, autoneg);
21539 +                       break;
21540 +               case IF_PORT_100BASET:
21541 +               case IF_PORT_100BASETX:
21542 +                       crisv32_eth_set_speed(dev, 100);
21543 +                       crisv32_eth_set_duplex(dev, autoneg);
21544 +                       break;
21545 +               case IF_PORT_100BASEFX:
21546 +               case IF_PORT_10BASE2:
21547 +               case IF_PORT_AUI:
21548 +                       spin_unlock(&np->lock);
21549 +                       return -EOPNOTSUPP;
21550 +                       break;
21551 +               default:
21552 +                       printk(KERN_ERR "%s: Invalid media selected",
21553 +                              dev->name);
21554 +                       spin_unlock(&np->lock);
21555 +                       return -EINVAL;
21556 +       }
21557 +       spin_unlock(&np->lock);
21558 +       return 0;
21559 +}
21560 +
21561 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21562 +/* 
21563 + * Switch the behaviour of the tx and rx buffers using 
21564 + * external or internal memory. Usage of the internal 
21565 + * memory is required for gigabit operation.
21566 + */
21567 +static void 
21568 +crisv32_eth_switch_intmem_usage(struct net_device *dev) 
21569 +{
21570 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21571 +
21572 +       int i;
21573 +       reg_dma_rw_stat stat;
21574 +       reg_dma_rw_cfg cfg = {0};
21575 +       reg_dma_rw_intr_mask intr_mask_in = { .in_eop = regk_dma_yes };
21576 +       reg_dma_rw_ack_intr ack_intr = { .data = 1,.in_eop = 1 };
21577 +       unsigned char *intmem_tmp;
21578 +
21579 +       /* Notify the kernel that the interface has stopped */
21580 +       netif_stop_queue(dev);
21581 +
21582 +       /* Stop the receiver DMA */
21583 +       cfg.en = regk_dma_no;
21584 +       REG_WR(dma, np->dma_in_inst, rw_cfg, cfg);
21585 +
21586 +       if (!(np->gigabit_mode)) {
21587 +               /* deallocate SKBs in rx_desc */
21588 +               for (i = 0; i < NBR_RX_DESC; i++)
21589 +                       dev_kfree_skb(np->dma_rx_descr_list[i].skb);
21590 +
21591 +               /* Init TX*/
21592 +               for(i=0; i < NBR_INTMEM_TX_BUF; i++) {
21593 +                       /* Allocate internal memory */
21594 +                       intmem_tmp = NULL;
21595 +                       intmem_tmp = crisv32_intmem_alloc(MAX_MEDIA_DATA_SIZE,
21596 +                                                         32);
21597 +                       /* Check that we really got the memory */
21598 +                       if (intmem_tmp == NULL) {
21599 +                               printk(KERN_ERR "%s: Can't allocate intmem for"
21600 +                                      " RX buffer nbr: %d\n", dev->name, i);
21601 +                               return;
21602 +                       }
21603 +                       /* Setup the list entry */
21604 +                       np->tx_intmem_buf_list[i].free = 1;
21605 +                       np->tx_intmem_buf_list[i].buf = intmem_tmp;
21606 +                       np->tx_intmem_buf_list[i].next = &np->tx_intmem_buf_list[i + 1];
21607 +               }
21608 +               /* Setup the last list entry */
21609 +               np->tx_intmem_buf_list[NBR_INTMEM_TX_BUF - 1].next = &np->tx_intmem_buf_list[0];
21610 +               /* Setup initial pointer */
21611 +               np->intmem_tx_buf_active = np->tx_intmem_buf_list;
21612 +               np->intmem_tx_buf_catch = np->tx_intmem_buf_list;
21613 +
21614 +               /* Init RX */
21615 +               for (i=0; i < NBR_INTMEM_RX_DESC; i++) {
21616 +                       /* Allocate internal memory */
21617 +                       intmem_tmp = NULL;
21618 +                       intmem_tmp = crisv32_intmem_alloc(MAX_MEDIA_DATA_SIZE, 32);
21619 +                       /* Check that we really got the memory */
21620 +                       if (intmem_tmp == NULL) {
21621 +                               printk(KERN_ERR "%s: Can't allocate intmem for"
21622 +                                      " desc nbr: %d\n", dev->name, i);
21623 +                               return;
21624 +                       }
21625 +                       /* Setup the descriptors*/
21626 +                       np->dma_rx_descr_list[i].skb = NULL;
21627 +                       np->dma_rx_descr_list[i].descr.buf = 
21628 +                            (void *) crisv32_intmem_virt_to_phys(intmem_tmp);
21629 +                       np->dma_rx_descr_list[i].descr.after =
21630 +                            (void *) crisv32_intmem_virt_to_phys(intmem_tmp + MAX_MEDIA_DATA_SIZE);
21631 +                       np->dma_rx_descr_list[i].descr.eol = 0;
21632 +                       np->dma_rx_descr_list[i].descr.in_eop = 0;
21633 +                       np->dma_rx_descr_list[i].descr.next =
21634 +                            (void *) virt_to_phys(&np->dma_rx_descr_list[i+1].descr);
21635 +               }
21636 +               /* Setup the last rx descriptor */
21637 +               np->dma_rx_descr_list[NBR_INTMEM_RX_DESC - 1].descr.eol = 1;
21638 +               np->dma_rx_descr_list[NBR_INTMEM_RX_DESC - 1].descr.next =
21639 +                  (void*) virt_to_phys(&np->dma_rx_descr_list[0].descr);
21640 +               /* Initialise initial receive pointers. */
21641 +               np->active_rx_desc = &np->dma_rx_descr_list[0];
21642 +               np->prev_rx_desc = &np->dma_rx_descr_list[NBR_INTMEM_RX_DESC - 1];
21643 +               np->last_rx_desc = np->prev_rx_desc;
21644 +
21645 +               np->gigabit_mode = 1;
21646 +       } else {
21647 +               /* dealloc TX intmem */
21648 +               for(i=0; i < NBR_INTMEM_TX_BUF; i++)
21649 +                       crisv32_intmem_free(np->tx_intmem_buf_list[i].buf);
21650 +
21651 +               /* dealloc RX intmem */
21652 +               for (i=0; i < NBR_INTMEM_RX_DESC; i++)
21653 +                       crisv32_intmem_free(crisv32_intmem_phys_to_virt((unsigned long)np->dma_rx_descr_list[i].descr.buf));
21654 +
21655 +               /* Setup new rx_desc and alloc SKBs */
21656 +               for (i = 0; i < NBR_RX_DESC; i++) {
21657 +                       struct sk_buff *skb;
21658 +                       
21659 +                       skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE);
21660 +                       np->dma_rx_descr_list[i].skb = skb;
21661 +                       np->dma_rx_descr_list[i].descr.buf =
21662 +                               (char*)virt_to_phys(skb->data);
21663 +                       np->dma_rx_descr_list[i].descr.after =
21664 +                          (char*)virt_to_phys(skb->data + MAX_MEDIA_DATA_SIZE);
21665 +
21666 +                       np->dma_rx_descr_list[i].descr.eol = 0;
21667 +                       np->dma_rx_descr_list[i].descr.in_eop = 0;
21668 +                       np->dma_rx_descr_list[i].descr.next =
21669 +                          (void *) virt_to_phys(&np->dma_rx_descr_list[i + 1].descr);
21670 +               }
21671 +    
21672 +               np->dma_rx_descr_list[NBR_RX_DESC - 1].descr.eol = 1;
21673 +               np->dma_rx_descr_list[NBR_RX_DESC - 1].descr.next =
21674 +                  (void *) virt_to_phys(&np->dma_rx_descr_list[0].descr);
21675 +      
21676 +               /* Initialise initial receive pointers. */
21677 +               np->active_rx_desc = &np->dma_rx_descr_list[0];
21678 +               np->prev_rx_desc = &np->dma_rx_descr_list[NBR_RX_DESC - 1];
21679 +               np->last_rx_desc = np->prev_rx_desc;
21680 +
21681 +               np->gigabit_mode = 0;
21682 +       }
21683 +
21684 +       /* Fill context descriptors. */
21685 +       np->ctxt_in.next = 0;        
21686 +       np->ctxt_in.saved_data =
21687 +          (dma_descr_data *) virt_to_phys(&np->dma_rx_descr_list[0].descr);
21688 +       np->ctxt_in.saved_data_buf = np->dma_rx_descr_list[0].descr.buf;
21689 +
21690 +       /* Enable irq and make sure that the irqs are cleared. */
21691 +       REG_WR(dma, np->dma_in_inst, rw_intr_mask, intr_mask_in);
21692 +       REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
21693 +
21694 +       /* Start input dma */
21695 +       cfg.en = regk_dma_yes;
21696 +       REG_WR(dma, np->dma_in_inst, rw_cfg, cfg);
21697 +       REG_WR(dma, np->dma_in_inst, rw_group_down,
21698 +              (int) virt_to_phys(&np->ctxt_in));
21699 +
21700 +       DMA_WR_CMD(np->dma_in_inst, regk_dma_load_c);
21701 +       DMA_WR_CMD(np->dma_in_inst, regk_dma_load_d | regk_dma_burst);
21702 +
21703 +       netif_wake_queue(dev);
21704 +
21705 +       stat = REG_RD(dma, np->dma_in_inst, rw_stat);
21706 +}
21707 +#endif
21708 +
21709 +static void
21710 +crisv32_eth_negotiate(struct net_device *dev)
21711 +{
21712 +       unsigned short data =
21713 +           crisv32_eth_get_mdio_reg(dev, MII_ADVERTISE);
21714 +       unsigned short ctrl1000 =
21715 +           crisv32_eth_get_mdio_reg(dev, MII_CTRL1000);
21716 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21717 +
21718 +       /* Make all capabilities available */
21719 +       data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
21720 +               ADVERTISE_100HALF | ADVERTISE_100FULL;
21721 +       ctrl1000 |= ADVERTISE_1000HALF | ADVERTISE_1000FULL;
21722 +
21723 +       /* Remove the speed capabilities that we that do not want */
21724 +       switch (np->current_speed_selection) {
21725 +               case 10 :
21726 +                       data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL);
21727 +                       ctrl1000 &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
21728 +                       break;
21729 +               case 100 :
21730 +                       data &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL);
21731 +                       ctrl1000 &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
21732 +                       break;
21733 +               case 1000 :
21734 +                       data &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
21735 +                                 ADVERTISE_100HALF | ADVERTISE_100FULL);
21736 +                       break;
21737 +       }
21738 +
21739 +       /* Remove the duplex capabilites that we do not want */
21740 +       if (np->current_duplex == full) {
21741 +               data &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF);
21742 +               ctrl1000 &= ~(ADVERTISE_1000HALF);
21743 +       }
21744 +       else if (np->current_duplex == half) {
21745 +               data &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL);
21746 +               ctrl1000 &= ~(ADVERTISE_1000FULL);
21747 +       }
21748 +
21749 +       crisv32_eth_set_mdio_reg(dev, MII_ADVERTISE, data);
21750 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21751 +       crisv32_eth_set_mdio_reg(dev, MII_CTRL1000, ctrl1000);
21752 +#endif
21753 +
21754 +       /* Renegotiate with link partner */
21755 +       if (autoneg_normal) {
21756 +         data = crisv32_eth_get_mdio_reg(dev, MII_BMCR);
21757 +         data |= BMCR_ANENABLE | BMCR_ANRESTART;
21758 +       }
21759 +       crisv32_eth_set_mdio_reg(dev, MII_BMCR, data);
21760 +}
21761 +static void
21762 +crisv32_eth_check_speed(unsigned long idev)
21763 +{
21764 +       static int led_initiated = 0;
21765 +       struct net_device *dev = (struct net_device *) idev;
21766 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21767 +
21768 +       unsigned long data;
21769 +       int old_speed;
21770 +       unsigned long flags;
21771 +       
21772 +       BUG_ON(!np);
21773 +       BUG_ON(!np->transceiver);
21774 +
21775 +       spin_lock(&np->transceiver_lock);
21776 +
21777 +       old_speed = np->current_speed;
21778 +       data = crisv32_eth_get_mdio_reg(dev, MII_BMSR);
21779 +
21780 +       if (!(data & BMSR_LSTATUS))
21781 +               np->current_speed = 0;
21782 +       else
21783 +               np->transceiver->check_speed(dev);
21784 +
21785 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21786 +       if ((old_speed != np->current_speed)
21787 +           && ((old_speed == 1000) || (np->current_speed == 1000))) {
21788 +               /* Switch between mii and gmii */
21789 +               reg_eth_rw_gen_ctrl gen_ctrl = REG_RD(eth, np->eth_inst,
21790 +                                                     rw_gen_ctrl);
21791 +               reg_eth_rw_tr_ctrl tr_ctrl = REG_RD(eth, np->eth_inst,
21792 +                                                   rw_tr_ctrl);
21793 +               if (old_speed == 1000) {
21794 +                       gen_ctrl.phy = regk_eth_mii;
21795 +                       gen_ctrl.gtxclk_out = regk_eth_no;
21796 +                       tr_ctrl.carrier_ext = regk_eth_no;
21797 +               }
21798 +               else {
21799 +                       gen_ctrl.phy = regk_eth_gmii;
21800 +                       gen_ctrl.gtxclk_out = regk_eth_yes;
21801 +                       tr_ctrl.carrier_ext = regk_eth_yes;
21802 +               }
21803 +               REG_WR(eth, np->eth_inst, rw_tr_ctrl, tr_ctrl);
21804 +               REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
21805 +
21806 +               crisv32_eth_switch_intmem_usage(dev);
21807 +       }
21808 +#endif
21809 +
21810 +       spin_lock_irqsave(&np->leds->led_lock, flags);
21811 +       if ((old_speed != np->current_speed) || !led_initiated) {
21812 +               led_initiated = 1;
21813 +               np->leds->clear_led_timer.data = (unsigned long) dev;
21814 +               if (np->current_speed) {
21815 +                       netif_carrier_on(dev);
21816 +                       crisv32_set_network_leds(LED_LINK, dev);
21817 +               } else {
21818 +                       netif_carrier_off(dev);
21819 +                       crisv32_set_network_leds(LED_NOLINK, dev);
21820 +               }
21821 +       }
21822 +       spin_unlock_irqrestore(&np->leds->led_lock, flags);
21823 +
21824 +       /* Reinitialize the timer. */
21825 +       np->speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
21826 +       add_timer(&np->speed_timer);
21827 +
21828 +       spin_unlock(&np->transceiver_lock);
21829 +}
21830 +
21831 +static void
21832 +crisv32_eth_set_speed(struct net_device *dev, unsigned long speed)
21833 +{
21834 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21835 +       
21836 +       spin_lock(&np->transceiver_lock);
21837 +       if (np->current_speed_selection != speed) {
21838 +               np->current_speed_selection = speed;
21839 +               crisv32_eth_negotiate(dev);
21840 +       }
21841 +       spin_unlock(&np->transceiver_lock);
21842 +}
21843 +
21844 +static void
21845 +crisv32_eth_check_duplex(unsigned long idev)
21846 +{
21847 +       struct net_device *dev = (struct net_device *) idev;
21848 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21849 +       reg_eth_rw_rec_ctrl rec_ctrl;
21850 +       int old_duplex = np->full_duplex;
21851 +
21852 +       np->transceiver->check_duplex(dev);
21853 +       
21854 +       if (old_duplex != np->full_duplex) {
21855 +               /* Duplex changed. */
21856 +               rec_ctrl = (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst,
21857 +                                                       rw_rec_ctrl);
21858 +               rec_ctrl.duplex = np->full_duplex;
21859 +               REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
21860 +       }
21861 +
21862 +       /* Reinitialize the timer. */
21863 +       np->duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
21864 +       add_timer(&np->duplex_timer);
21865 +}
21866 +
21867 +static void
21868 +crisv32_eth_set_duplex(struct net_device *dev, enum duplex new_duplex)
21869 +{
21870 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21871 +       spin_lock(&np->transceiver_lock);
21872 +       if (np->current_duplex != new_duplex) {
21873 +               np->current_duplex = new_duplex;
21874 +               crisv32_eth_negotiate(dev);
21875 +       }
21876 +       spin_unlock(&np->transceiver_lock);
21877 +}
21878 +
21879 +static int
21880 +crisv32_eth_probe_transceiver(struct net_device *dev)
21881 +{
21882 +       unsigned int phyid_high;
21883 +       unsigned int phyid_low;
21884 +       unsigned int oui;
21885 +       struct transceiver_ops *ops = NULL;
21886 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21887 +
21888 +       /* Probe MDIO physical address. */
21889 +       for (np->mdio_phy_addr = 0;
21890 +            np->mdio_phy_addr <= 31; np->mdio_phy_addr++) {
21891 +               if (crisv32_eth_get_mdio_reg(dev, MII_BMSR) != 0xffff)
21892 +                       break;
21893 +       }
21894 +
21895 +       if (np->mdio_phy_addr == 32)
21896 +               return -ENODEV;
21897 +
21898 +       /* Get manufacturer. */
21899 +       phyid_high = crisv32_eth_get_mdio_reg(dev, MII_PHYSID1);
21900 +       phyid_low = crisv32_eth_get_mdio_reg(dev, MII_PHYSID2);
21901 +       
21902 +       oui = (phyid_high << 6) | (phyid_low >> 10);
21903 +
21904 +       for (ops = &transceivers[0]; ops->oui; ops++) {
21905 +               if (ops->oui == oui)
21906 +                       break;
21907 +       }
21908 +
21909 +       np->transceiver = ops;
21910 +       return 0;
21911 +}
21912 +
21913 +static void
21914 +generic_check_speed(struct net_device *dev)
21915 +{
21916 +       unsigned long data;
21917 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21918 +
21919 +       data = crisv32_eth_get_mdio_reg(dev, MII_ADVERTISE);
21920 +       if ((data & ADVERTISE_100FULL) ||
21921 +           (data & ADVERTISE_100HALF))
21922 +               np->current_speed = 100;
21923 +       else
21924 +               np->current_speed = 10; 
21925 +}
21926 +
21927 +static void
21928 +generic_check_duplex(struct net_device *dev)
21929 +{
21930 +       unsigned long data;
21931 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21932 +       
21933 +       data = crisv32_eth_get_mdio_reg(dev, MII_ADVERTISE);
21934 +       if ((data & ADVERTISE_10FULL) ||
21935 +           (data & ADVERTISE_100FULL))
21936 +               np->full_duplex = 1;
21937 +       else
21938 +               np->full_duplex = 0;
21939 +}
21940 +
21941 +static void
21942 +broadcom_check_speed(struct net_device *dev)
21943 +{
21944 +       unsigned long data;
21945 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21946 +
21947 +       data = crisv32_eth_get_mdio_reg(dev, MDIO_AUX_CTRL_STATUS_REG);
21948 +       np->current_speed = (data & MDIO_BC_SPEED ? 100 : 10);
21949 +}
21950 +
21951 +static void
21952 +broadcom_check_duplex(struct net_device *dev)
21953 +{
21954 +       unsigned long data;
21955 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21956 +
21957 +       data = crisv32_eth_get_mdio_reg(dev, MDIO_AUX_CTRL_STATUS_REG);        
21958 +       np->full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
21959 +}
21960 +
21961 +static void
21962 +tdk_check_speed(struct net_device *dev)
21963 +{
21964 +       unsigned long data;
21965 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21966 +       
21967 +       data = crisv32_eth_get_mdio_reg(dev, MDIO_TDK_DIAGNOSTIC_REG);
21968 +       np->current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10);
21969 +}
21970 +
21971 +static void
21972 +tdk_check_duplex(struct net_device *dev)
21973 +{
21974 +       unsigned long data;
21975 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21976 +
21977 +       data = crisv32_eth_get_mdio_reg(dev, MDIO_TDK_DIAGNOSTIC_REG);
21978 +       np->full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
21979 +
21980 +}
21981 +
21982 +static void
21983 +intel_check_speed(struct net_device *dev)
21984 +{
21985 +       unsigned long data;
21986 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21987 +       data = crisv32_eth_get_mdio_reg(dev, MDIO_INT_STATUS_REG_2);
21988 +       np->current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
21989 +}
21990 +
21991 +static void
21992 +intel_check_duplex(struct net_device *dev)
21993 +{
21994 +       unsigned long data;
21995 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
21996 +       
21997 +       data = crisv32_eth_get_mdio_reg(dev, MDIO_INT_STATUS_REG_2);        
21998 +       np->full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
21999 +}
22000 +
22001 +static void
22002 +national_check_speed(struct net_device *dev)
22003 +{
22004 +       unsigned long data;
22005 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
22006 +
22007 +       data = crisv32_eth_get_mdio_reg(dev, MDIO_NAT_LINK_AN_REG);
22008 +       if (data & MDIO_NAT_1000)
22009 +               np->current_speed = 1000;
22010 +       else if (data & MDIO_NAT_100)
22011 +               np->current_speed = 100;
22012 +       else
22013 +               np->current_speed = 10; 
22014 +}
22015 +
22016 +static void
22017 +national_check_duplex(struct net_device *dev)
22018 +{
22019 +       unsigned long data;
22020 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
22021 +       
22022 +       data = crisv32_eth_get_mdio_reg(dev, MDIO_NAT_LINK_AN_REG);
22023 +       if (data & MDIO_NAT_FULL_DUPLEX_IND)
22024 +               np->full_duplex = 1;
22025 +       else
22026 +               np->full_duplex = 0;
22027 +}
22028 +
22029 +static void
22030 +crisv32_eth_reset_tranceiver(struct net_device *dev)
22031 +{
22032 +       int i;
22033 +       unsigned short cmd;
22034 +       unsigned short data;
22035 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
22036 +
22037 +       data = crisv32_eth_get_mdio_reg(dev, MII_BMCR);
22038 +
22039 +       cmd = (MDIO_START << 14)
22040 +               | (MDIO_WRITE << 12)
22041 +               | (np->mdio_phy_addr << 7)
22042 +               | (MII_BMCR << 2);
22043 +
22044 +       crisv32_eth_send_mdio_cmd(dev, cmd, 1);
22045 +
22046 +       data |= 0x8000;
22047 +
22048 +       /* Magic value is number of bits. */
22049 +       for (i = 15; i >= 0; i--)
22050 +               crisv32_eth_send_mdio_bit(dev, GET_BIT(i, data));
22051 +}
22052 +
22053 +static unsigned short
22054 +crisv32_eth_get_mdio_reg(struct net_device *dev, unsigned char reg_num)
22055 +{
22056 +       int i;
22057 +       unsigned short cmd;     /* Data to be sent on MDIO port. */
22058 +       unsigned short data;    /* Data read from MDIO. */
22059 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
22060 +
22061 +       /* Start of frame, OP Code, Physical Address, Register Address. */
22062 +       cmd = (MDIO_START << 14)
22063 +               | (MDIO_READ << 12)
22064 +               | (np->mdio_phy_addr << 7)
22065 +               | (reg_num << 2);
22066 +
22067 +       crisv32_eth_send_mdio_cmd(dev, cmd, 0);
22068 +
22069 +       data = 0;
22070 +
22071 +       /* Receive data. Magic value is number of bits. */
22072 +       for (i = 15; i >= 0; i--)
22073 +               data |= (crisv32_eth_receive_mdio_bit(dev) << i);
22074 +
22075 +       return data;
22076 +}
22077 +
22078 +static void
22079 +crisv32_eth_set_mdio_reg(struct net_device *dev, unsigned char reg, int value)
22080 +{
22081 +       int bitCounter;
22082 +       unsigned short cmd;
22083 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
22084 +
22085 +       cmd = (MDIO_START << 14)
22086 +               | (MDIO_WRITE << 12)
22087 +               | (np->mdio_phy_addr << 7)
22088 +               | (reg << 2);
22089 +       
22090 +       crisv32_eth_send_mdio_cmd(dev, cmd, 1);
22091 +
22092 +       /* Data... */
22093 +       for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
22094 +               crisv32_eth_send_mdio_bit(dev, GET_BIT(bitCounter, value));
22095 +       }
22096 +}
22097 +
22098 +static void
22099 +crisv32_eth_send_mdio_cmd(struct net_device *dev, unsigned short cmd,
22100 +                         int write_cmd)
22101 +{
22102 +       int i;
22103 +       unsigned char data = 0x2;
22104 +
22105 +       /* Preamble. Magic value is number of bits. */
22106 +       for (i = 31; i >= 0; i--)
22107 +               crisv32_eth_send_mdio_bit(dev, GET_BIT(i, MDIO_PREAMBLE));
22108 +
22109 +       for (i = 15; i >= 2; i--)
22110 +               crisv32_eth_send_mdio_bit(dev, GET_BIT(i, cmd));
22111 +
22112 +       /* Turnaround. */
22113 +       for (i = 1; i >= 0; i--)
22114 +               if (write_cmd)
22115 +                       crisv32_eth_send_mdio_bit(dev, GET_BIT(i, data));
22116 +               else
22117 +                       crisv32_eth_receive_mdio_bit(dev);
22118 +}
22119 +
22120 +static void
22121 +crisv32_eth_send_mdio_bit(struct net_device *dev, unsigned char bit)
22122 +{
22123 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
22124 +       
22125 +       reg_eth_rw_mgm_ctrl mgm_ctrl = {
22126 +               .mdoe = regk_eth_yes,
22127 +               .mdio = bit & 1
22128 +       };
22129 +
22130 +       REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
22131 +
22132 +       udelay(1);
22133 +
22134 +       mgm_ctrl.mdc = 1;
22135 +       REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
22136 +
22137 +       udelay(1);
22138 +}
22139 +
22140 +static unsigned char
22141 +crisv32_eth_receive_mdio_bit(struct net_device *dev)
22142 +{
22143 +       reg_eth_r_stat stat;
22144 +       reg_eth_rw_mgm_ctrl mgm_ctrl = {0};
22145 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
22146 +
22147 +       REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
22148 +       stat = REG_RD(eth, np->eth_inst, r_stat);
22149 +
22150 +       udelay(1);
22151 +
22152 +       mgm_ctrl.mdc = 1;
22153 +       REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
22154 +
22155 +       udelay(1);
22156 +       return stat.mdio;
22157 +}
22158 +
22159 +static void
22160 +crisv32_clear_network_leds(unsigned long priv)
22161 +{
22162 +       struct net_device *dev = (struct net_device*)priv;
22163 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
22164 +       unsigned long flags;
22165 +
22166 +       spin_lock_irqsave(&np->leds->led_lock, flags);
22167 +       if (np->leds->led_active && time_after(jiffies, np->leds->led_next_time)) {
22168 +               crisv32_set_network_leds(LED_NOACTIVITY, dev);
22169 +
22170 +               /* Set the earliest time we may set the LED */
22171 +               np->leds->led_next_time = jiffies + NET_FLASH_PAUSE;
22172 +               np->leds->led_active = 0;
22173 +       }
22174 +       spin_unlock_irqrestore(&np->leds->led_lock, flags);
22175 +}
22176 +
22177 +static void
22178 +crisv32_set_network_leds(int active, struct net_device *dev)
22179 +{
22180 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
22181 +       int light_leds = 0;
22182 +
22183 +       if (np->leds->ledgrp == LED_GRP_NONE)
22184 +         return;
22185 +
22186 +       if (active == LED_NOLINK) {
22187 +               if (dev == crisv32_dev[0])
22188 +                       np->leds->ifisup[0] = 0;
22189 +               else
22190 +                       np->leds->ifisup[1] = 0;
22191 +       }
22192 +       else if (active == LED_LINK) {
22193 +               if (dev == crisv32_dev[0])
22194 +                       np->leds->ifisup[0] = 1;
22195 +               else
22196 +                       np->leds->ifisup[1] = 1;
22197 +#if defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK) 
22198 +               light_leds = 1;
22199 +       } else {
22200 +               light_leds = (active == LED_NOACTIVITY);
22201 +#elif defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY)
22202 +               light_leds = 0;
22203 +       } else {
22204 +               light_leds = (active == LED_ACTIVITY);
22205 +#else
22206 +#error "Define either CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK or CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY"
22207 +#endif 
22208 +       }
22209 +
22210 +       if (!use_network_leds) {
22211 +               NET_LED_SET(np->leds->ledgrp,LED_OFF);
22212 +               return;
22213 +       }
22214 +
22215 +       if (!np->current_speed) {
22216 +               /* Set link down if none of the interfaces that use this led group is up */
22217 +               if ((np->leds->ifisup[0] + np->leds->ifisup[1]) == 0) {
22218 +#if defined(CONFIG_ETRAX_NETWORK_RED_ON_NO_CONNECTION)
22219 +                       /* Make LED red, link is down */
22220 +                       NET_LED_SET(np->leds->ledgrp,LED_RED);
22221 +#else
22222 +                       NET_LED_SET(np->leds->ledgrp,LED_OFF);
22223 +#endif 
22224 +               }
22225 +       }
22226 +       else if (light_leds) {
22227 +               if (np->current_speed == 10) {
22228 +                       NET_LED_SET(np->leds->ledgrp,LED_ORANGE);
22229 +               } else {
22230 +                       NET_LED_SET(np->leds->ledgrp,LED_GREEN);
22231 +               }
22232 +       }
22233 +       else {
22234 +               NET_LED_SET(np->leds->ledgrp,LED_OFF);
22235 +       }
22236 +}
22237 +
22238 +#ifdef CONFIG_NET_POLL_CONTROLLER
22239 +static void
22240 +crisv32_netpoll(struct net_device* netdev)
22241 +{
22242 +       crisv32rx_eth_interrupt(DMA0_INTR_VECT, netdev, NULL);
22243 +}
22244 +#endif
22245 +
22246 +#ifdef CONFIG_CPU_FREQ
22247 +static int
22248 +crisv32_ethernet_freq_notifier(struct notifier_block *nb,
22249 +                              unsigned long val, void *data)
22250 +{
22251 +       struct cpufreq_freqs *freqs = data;
22252 +       if (val == CPUFREQ_POSTCHANGE) {
22253 +               int i;
22254 +               for (i = 0; i < 2; i++) {
22255 +                       struct net_device* dev = crisv32_dev[i];
22256 +                       unsigned short data;
22257 +                       if (dev == NULL)
22258 +                               continue;
22259 +
22260 +                       data = crisv32_eth_get_mdio_reg(dev, MII_BMCR);
22261 +                       if (freqs->new == 200000)
22262 +                               data &= ~BMCR_PDOWN;
22263 +                       else
22264 +                               data |= BMCR_PDOWN;
22265 +                       crisv32_eth_set_mdio_reg(dev, MII_BMCR, data);
22266 +               }
22267 +       }
22268 +       return 0;
22269 +}
22270 +#endif
22271 +
22272 +/*
22273 + * Must be called with the np->lock held.
22274 + */
22275 +static void crisv32_ethernet_bug(struct net_device *dev)
22276 +{
22277 +       struct crisv32_ethernet_local *np = netdev_priv(dev);
22278 +       dma_descr_data *dma_pos;
22279 +       dma_descr_data *in_dma_pos;
22280 +       reg_dma_rw_stat stat = {0};
22281 +       reg_dma_rw_stat in_stat = {0};
22282 +       int i;
22283 +               
22284 +       /* Get the current output dma position. */
22285 +       stat = REG_RD(dma, np->dma_out_inst, rw_stat);
22286 +       dma_pos = phys_to_virt(REG_RD_INT(dma, np->dma_out_inst, rw_data));
22287 +       in_stat = REG_RD(dma, np->dma_in_inst, rw_stat);
22288 +       in_dma_pos = phys_to_virt(REG_RD_INT(dma, np->dma_in_inst, rw_data));
22289 +       
22290 +       printk("%s:\n"
22291 +              "stat.list_state=%x\n"
22292 +              "stat.mode=%x\n"
22293 +              "stat.stream_cmd_src=%x\n"
22294 +              "dma_pos=%x\n"
22295 +              "in_stat.list_state=%x\n"
22296 +              "in_stat.mode=%x\n"
22297 +              "in_stat.stream_cmd_src=%x\n"
22298 +              "in_dma_pos=%x\n"
22299 +              "catch=%x active=%x\n"
22300 +              "packets=%d queue=%d\n"
22301 +              "intr_vect.r_vect=%x\n"
22302 +              "dma.r_masked_intr=%x dma.rw_ack_intr=%x "
22303 +              "dma.r_intr=%x dma.rw_intr_masked=%x\n"
22304 +              "eth.r_stat=%x\n",
22305 +              __func__,
22306 +              stat.list_state, stat.mode, stat.stream_cmd_src,
22307 +              (unsigned int)dma_pos,
22308 +              in_stat.list_state, in_stat.mode, in_stat.stream_cmd_src,
22309 +              (unsigned int)in_dma_pos,
22310 +              (unsigned int)&np->catch_tx_desc->descr,
22311 +              (unsigned int)&np->active_tx_desc->descr,
22312 +              np->txpackets,
22313 +              netif_queue_stopped(dev),
22314 +              REG_RD_INT(intr_vect, regi_irq, r_vect),
22315 +              REG_RD_INT(dma, np->dma_out_inst, r_masked_intr),
22316 +              REG_RD_INT(dma, np->dma_out_inst, rw_ack_intr),
22317 +              REG_RD_INT(dma, np->dma_out_inst, r_intr),
22318 +              REG_RD_INT(dma, np->dma_out_inst, rw_intr_mask),
22319 +              REG_RD_INT(eth, np->eth_inst, r_stat));
22320 +
22321 +       printk("tx-descriptors:\n");
22322 +       for (i = 0; i < NBR_TX_DESC; i++) {
22323 +               printk("txdesc[%d]=0x%x\n", i, (unsigned int)
22324 +                      virt_to_phys(&np->dma_tx_descr_list[i].descr));
22325 +               printk("txdesc[%d].skb=0x%x\n", i,
22326 +                      (unsigned int)np->dma_tx_descr_list[i].skb);
22327 +               printk("txdesc[%d].buf=0x%x\n", i,
22328 +                      (unsigned int)np->dma_tx_descr_list[i].descr.buf);
22329 +               printk("txdesc[%d].after=0x%x\n", i,
22330 +                      (unsigned int)np->dma_tx_descr_list[i].descr.after);
22331 +               printk("txdesc[%d].intr=%x\n", i,
22332 +                      np->dma_tx_descr_list[i].descr.intr);
22333 +               printk("txdesc[%d].eol=%x\n", i,
22334 +                      np->dma_tx_descr_list[i].descr.eol);
22335 +               printk("txdesc[%d].out_eop=%x\n", i,
22336 +                      np->dma_tx_descr_list[i].descr.out_eop);
22337 +               printk("txdesc[%d].wait=%x\n", i,
22338 +                      np->dma_tx_descr_list[i].descr.wait);
22339 +       }
22340 +}
22341 +
22342 +
22343 +static int
22344 +crisv32_init_module(void)
22345 +{
22346 +       return crisv32_ethernet_init();
22347 +}
22348 +
22349 +module_init(crisv32_init_module);
22350 diff -urN linux-2.6.19.2.orig/drivers/net/cris/eth_v32.h linux-2.6.19.2.dev/drivers/net/cris/eth_v32.h
22351 --- linux-2.6.19.2.orig/drivers/net/cris/eth_v32.h      1970-01-01 01:00:00.000000000 +0100
22352 +++ linux-2.6.19.2.dev/drivers/net/cris/eth_v32.h       2007-02-06 11:10:37.000000000 +0100
22353 @@ -0,0 +1,248 @@
22354 +/*
22355 + * Definitions for ETRAX FS ethernet driver.
22356 + *
22357 + * Copyright (C) 2003, 2004, 2005 Axis Communications.
22358 + */
22359 +
22360 +#ifndef _ETRAX_ETHERNET_H_
22361 +#define _ETRAX_ETHERNET_H_
22362 +
22363 +#include <asm/arch/hwregs/dma.h>
22364 +
22365 +
22366 +#define MAX_MEDIA_DATA_SIZE 1522       /* Max packet size. */
22367 +
22368 +#define NBR_RX_DESC 64                 /* Number of RX descriptors. */
22369 +#define NBR_TX_DESC 16                 /* Number of TX descriptors. */
22370 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
22371 +#define NBR_INTMEM_RX_DESC 5           /* Number of RX descriptors in int. mem.
22372 +                                        * when running in gigabit mode.
22373 +                                        * Should be less then NBR_RX_DESC 
22374 +                                        */
22375 +#define NBR_INTMEM_TX_BUF 4            /* Number of TX buffers in int. mem
22376 +                                        * when running in gigabit mode.
22377 +                                        * Should be less than NBR_TX_DESC
22378 +                                        */
22379 +#endif
22380 +
22381 +/* Large packets are sent directly to upper layers while small packets
22382 + * are copied (to reduce memory waste).  The following constant
22383 + * decides the breakpoint.
22384 + */
22385 +#define RX_COPYBREAK        (256)
22386 +
22387 +#define ETHER_HEAD_LEN      (14)
22388 +
22389 +/* 
22390 +** MDIO constants.
22391 +*/
22392 +#define MDIO_START                          0x1
22393 +#define MDIO_READ                           0x2
22394 +#define MDIO_WRITE                          0x1
22395 +#define MDIO_PREAMBLE              0xfffffffful
22396 +
22397 +/* Broadcom specific */
22398 +#define MDIO_AUX_CTRL_STATUS_REG           0x18
22399 +#define MDIO_BC_FULL_DUPLEX_IND             0x1
22400 +#define MDIO_BC_SPEED                       0x2
22401 +
22402 +/* TDK specific */
22403 +#define MDIO_TDK_DIAGNOSTIC_REG              18
22404 +#define MDIO_TDK_DIAGNOSTIC_RATE          0x400
22405 +#define MDIO_TDK_DIAGNOSTIC_DPLX          0x800
22406 +
22407 +/*Intel LXT972A specific*/
22408 +#define MDIO_INT_STATUS_REG_2             0x0011
22409 +#define MDIO_INT_FULL_DUPLEX_IND ( 0x0001 << 9  )
22410 +#define MDIO_INT_SPEED          ( 0x0001 << 14 )
22411 +
22412 +/*National Semiconductor DP83865 specific*/
22413 +#define MDIO_NAT_LINK_AN_REG              0x11
22414 +#define MDIO_NAT_1000            (0x0001 << 4)
22415 +#define MDIO_NAT_100             (0x0001 << 3)
22416 +#define MDIO_NAT_FULL_DUPLEX_IND (0x0001 << 1)
22417 +
22418 +/* Network flash constants */
22419 +#define NET_FLASH_TIME                  (HZ/50) /* 20 ms */
22420 +#define NET_FLASH_PAUSE                 (HZ/100) /* 10 ms */
22421 +#define NET_LINK_UP_CHECK_INTERVAL     (2*HZ)  /* 2 seconds. */
22422 +#define NET_DUPLEX_CHECK_INTERVAL      (2*HZ)  /* 2 seconds. */
22423 +
22424 +/* Duplex settings. */
22425 +enum duplex {
22426 +       half,
22427 +       full,
22428 +       autoneg
22429 +};
22430 +
22431 +/* Some transceivers requires special handling. */
22432 +struct transceiver_ops {
22433 +       unsigned int oui;
22434 +       void (*check_speed) (struct net_device * dev);
22435 +       void (*check_duplex) (struct net_device * dev);
22436 +};
22437 +
22438 +typedef struct crisv32_eth_descr {
22439 +       dma_descr_data descr __attribute__ ((__aligned__(32)));
22440 +       struct sk_buff *skb;
22441 +       unsigned char *linearized_packet;
22442 +} crisv32_eth_descr;
22443 +
22444 +
22445 +
22446 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
22447 +struct tx_buffer_list {
22448 +  struct tx_buffer_list *next;
22449 +  unsigned char *buf;
22450 +  char free;
22451 +};
22452 +#endif
22453 +
22454 +/* LED stuff */
22455 +#define LED_GRP_0 0
22456 +#define LED_GRP_1 1
22457 +#define LED_GRP_NONE 2
22458 +
22459 +#define LED_ACTIVITY   0
22460 +#define LED_NOACTIVITY 1
22461 +#define LED_LINK       2
22462 +#define LED_NOLINK     3
22463 +
22464 +struct crisv32_eth_leds {
22465 +       unsigned int ledgrp;
22466 +       int led_active;
22467 +       unsigned long led_next_time;
22468 +       struct timer_list clear_led_timer;
22469 +       spinlock_t led_lock; /* Protect LED state */
22470 +       int ifisup[2];
22471 +};
22472 +
22473 +#define NET_LED_SET(x,y)                               \
22474 +       do {                                            \
22475 +               if (x == 0) LED_NETWORK_GRP0_SET(y);    \
22476 +               if (x == 1) LED_NETWORK_GRP1_SET(y);    \
22477 +       } while (0)
22478 +
22479 +/* Information that need to be kept for each device. */
22480 +struct crisv32_ethernet_local {
22481 +       dma_descr_context ctxt_in __attribute__ ((__aligned__(32)));
22482 +       dma_descr_context ctxt_out __attribute__ ((__aligned__(32)));
22483 +
22484 +       crisv32_eth_descr *active_rx_desc;
22485 +       crisv32_eth_descr *prev_rx_desc;
22486 +       crisv32_eth_descr *last_rx_desc;
22487 +
22488 +       crisv32_eth_descr *active_tx_desc;
22489 +       crisv32_eth_descr *prev_tx_desc;
22490 +       crisv32_eth_descr *catch_tx_desc;
22491 +
22492 +       crisv32_eth_descr dma_rx_descr_list[NBR_RX_DESC];
22493 +       crisv32_eth_descr dma_tx_descr_list[NBR_TX_DESC];
22494 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
22495 +       struct tx_buffer_list tx_intmem_buf_list[NBR_INTMEM_TX_BUF];
22496 +       struct tx_buffer_list *intmem_tx_buf_active;
22497 +       struct tx_buffer_list *intmem_tx_buf_catch;
22498 +       char gigabit_mode;
22499 +#endif
22500 +       char new_rx_package;
22501 +
22502 +       /* DMA and ethernet registers for the device. */
22503 +       int eth_inst;
22504 +       int dma_in_inst;
22505 +       int dma_out_inst;
22506 +
22507 +       /* Network speed indication. */
22508 +       struct timer_list speed_timer;
22509 +       int current_speed;              /* Speed read from tranceiver */
22510 +       int current_speed_selection;    /* Speed selected by user */
22511 +       int sender_started;
22512 +       int txpackets;
22513 +
22514 +       struct crisv32_eth_leds *leds;
22515 +
22516 +       /* Duplex. */
22517 +       struct timer_list duplex_timer;
22518 +       int full_duplex;
22519 +       enum duplex current_duplex;
22520 +
22521 +       struct net_device_stats stats;
22522 +
22523 +       /* Transciever address. */
22524 +       unsigned int mdio_phy_addr;
22525 +
22526 +       struct transceiver_ops *transceiver;
22527 +
22528 +       /* 
22529 +        * TX control lock. This protects the transmit buffer ring state along
22530 +        * with the "tx full" state of the driver.  This means all netif_queue
22531 +        * flow control actions are protected by this lock as well.
22532 +        */
22533 +       spinlock_t lock;
22534 +       spinlock_t transceiver_lock; /* Protect transceiver state. */
22535 +};
22536 +
22537 +/* Function prototypes. */
22538 +static int crisv32_ethernet_init(void);
22539 +static int crisv32_ethernet_device_init(struct net_device* dev);
22540 +static int crisv32_eth_open(struct net_device *dev);
22541 +static int crisv32_eth_close(struct net_device *dev);
22542 +static int crisv32_eth_set_mac_address(struct net_device *dev, void *vpntr);
22543 +static irqreturn_t crisv32rx_eth_interrupt(int irq, void *dev_id);
22544 +static irqreturn_t crisv32tx_eth_interrupt(int irq, void *dev_id);
22545 +static irqreturn_t crisv32nw_eth_interrupt(int irq, void *dev_id);
22546 +static void crisv32_eth_receive_packet(struct net_device *dev);
22547 +static int crisv32_eth_send_packet(struct sk_buff *skb, struct net_device *dev);
22548 +static void crisv32_eth_hw_send_packet(unsigned char *buf, int length,
22549 +                                      void *priv);
22550 +static void crisv32_eth_tx_timeout(struct net_device *dev);
22551 +static void crisv32_eth_set_multicast_list(struct net_device *dev);
22552 +static int crisv32_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
22553 +                            int cmd);
22554 +static int crisv32_eth_set_config(struct net_device* dev, struct ifmap* map);
22555 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
22556 +static void crisv32_eth_switch_intmem_usage(struct net_device *dev);
22557 +#endif
22558 +static void crisv32_eth_negotiate(struct net_device *dev);
22559 +static void crisv32_eth_check_speed(unsigned long idev);
22560 +static void crisv32_eth_set_speed(struct net_device *dev, unsigned long speed);
22561 +static void crisv32_eth_check_duplex(unsigned long idev);
22562 +static void crisv32_eth_set_duplex(struct net_device *dev, enum duplex);
22563 +static int crisv32_eth_probe_transceiver(struct net_device *dev);
22564 +
22565 +static struct ethtool_ops crisv32_ethtool_ops;
22566 +
22567 +static void generic_check_speed(struct net_device *dev);
22568 +static void generic_check_duplex(struct net_device *dev);
22569 +static void broadcom_check_speed(struct net_device *dev);
22570 +static void broadcom_check_duplex(struct net_device *dev);
22571 +static void tdk_check_speed(struct net_device *dev);
22572 +static void tdk_check_duplex(struct net_device *dev);
22573 +static void intel_check_speed(struct net_device* dev);
22574 +static void intel_check_duplex(struct net_device *dev);
22575 +static void national_check_speed(struct net_device* dev);
22576 +static void national_check_duplex(struct net_device *dev);
22577 +
22578 +#ifdef CONFIG_NET_POLL_CONTROLLER
22579 +static void crisv32_netpoll(struct net_device* dev);
22580 +#endif
22581 +
22582 +static void crisv32_clear_network_leds(unsigned long dummy);
22583 +static void crisv32_set_network_leds(int active, struct net_device* dev);
22584 +
22585 +static void crisv32_eth_reset_tranceiver(struct net_device *dev);
22586 +static unsigned short crisv32_eth_get_mdio_reg(struct net_device *dev,
22587 +                                              unsigned char reg_num);
22588 +static void crisv32_eth_set_mdio_reg(struct net_device *dev,
22589 +                                     unsigned char reg_num,
22590 +                                     int val);
22591 +static void crisv32_eth_send_mdio_cmd(struct net_device *dev,
22592 +                                     unsigned short cmd, int write_cmd);
22593 +static void crisv32_eth_send_mdio_bit(struct net_device *dev,
22594 +                                     unsigned char bit);
22595 +static unsigned char crisv32_eth_receive_mdio_bit(struct net_device *dev);
22596 +
22597 +static struct net_device_stats *crisv32_get_stats(struct net_device *dev);
22598 +static void crisv32_start_dma_out(struct crisv32_ethernet_local* np);
22599 +
22600 +
22601 +#endif /* _ETRAX_ETHERNET_H_ */