1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Driver for the Macintosh 68K onboard MACE controller with PSC
4  *	driven DMA. The MACE driver code is derived from mace.c. The
5  *	Mac68k theory of operation is courtesy of the MacBSD wizards.
6  *
7  *	Copyright (C) 1996 Paul Mackerras.
8  *	Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
9  *
10  *	Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
11  *
12  *	Copyright (C) 2007 Finn Thain
13  *
14  *	Converted to DMA API, converted to unified driver model,
15  *	sync'd some routines with mace.c and fixed various bugs.
16  */
17 
18 
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/delay.h>
24 #include <linux/string.h>
25 #include <linux/crc32.h>
26 #include <linux/bitrev.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/platform_device.h>
29 #include <linux/gfp.h>
30 #include <linux/interrupt.h>
31 #include <asm/io.h>
32 #include <asm/macints.h>
33 #include <asm/mac_psc.h>
34 #include <asm/page.h>
35 #include "mace.h"
36 
37 static char mac_mace_string[] = "macmace";
38 
39 #define N_TX_BUFF_ORDER	0
40 #define N_TX_RING	(1 << N_TX_BUFF_ORDER)
41 #define N_RX_BUFF_ORDER	3
42 #define N_RX_RING	(1 << N_RX_BUFF_ORDER)
43 
44 #define TX_TIMEOUT	HZ
45 
46 #define MACE_BUFF_SIZE	0x800
47 
48 /* Chip rev needs workaround on HW & multicast addr change */
49 #define BROKEN_ADDRCHG_REV	0x0941
50 
51 /* The MACE is simply wired down on a Mac68K box */
52 
53 #define MACE_BASE	(void *)(0x50F1C000)
54 #define MACE_PROM	(void *)(0x50F08001)
55 
56 struct mace_data {
57 	volatile struct mace *mace;
58 	unsigned char *tx_ring;
59 	dma_addr_t tx_ring_phys;
60 	unsigned char *rx_ring;
61 	dma_addr_t rx_ring_phys;
62 	int dma_intr;
63 	int rx_slot, rx_tail;
64 	int tx_slot, tx_sloti, tx_count;
65 	int chipid;
66 	struct device *device;
67 };
68 
69 struct mace_frame {
70 	u8	rcvcnt;
71 	u8	pad1;
72 	u8	rcvsts;
73 	u8	pad2;
74 	u8	rntpc;
75 	u8	pad3;
76 	u8	rcvcc;
77 	u8	pad4;
78 	u32	pad5;
79 	u32	pad6;
80 	u8	data[1];
81 	/* And frame continues.. */
82 };
83 
84 #define PRIV_BYTES	sizeof(struct mace_data)
85 
86 static int mace_open(struct net_device *dev);
87 static int mace_close(struct net_device *dev);
88 static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
89 static void mace_set_multicast(struct net_device *dev);
90 static int mace_set_address(struct net_device *dev, void *addr);
91 static void mace_reset(struct net_device *dev);
92 static irqreturn_t mace_interrupt(int irq, void *dev_id);
93 static irqreturn_t mace_dma_intr(int irq, void *dev_id);
94 static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
95 static void __mace_set_address(struct net_device *dev, const void *addr);
96 
97 /*
98  * Load a receive DMA channel with a base address and ring length
99  */
100 
mace_load_rxdma_base(struct net_device * dev,int set)101 static void mace_load_rxdma_base(struct net_device *dev, int set)
102 {
103 	struct mace_data *mp = netdev_priv(dev);
104 
105 	psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
106 	psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
107 	psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
108 	psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
109 	mp->rx_tail = 0;
110 }
111 
112 /*
113  * Reset the receive DMA subsystem
114  */
115 
mace_rxdma_reset(struct net_device * dev)116 static void mace_rxdma_reset(struct net_device *dev)
117 {
118 	struct mace_data *mp = netdev_priv(dev);
119 	volatile struct mace *mace = mp->mace;
120 	u8 maccc = mace->maccc;
121 
122 	mace->maccc = maccc & ~ENRCV;
123 
124 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
125 	mace_load_rxdma_base(dev, 0x00);
126 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
127 
128 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
129 	mace_load_rxdma_base(dev, 0x10);
130 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
131 
132 	mace->maccc = maccc;
133 	mp->rx_slot = 0;
134 
135 	psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
136 	psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
137 }
138 
139 /*
140  * Reset the transmit DMA subsystem
141  */
142 
mace_txdma_reset(struct net_device * dev)143 static void mace_txdma_reset(struct net_device *dev)
144 {
145 	struct mace_data *mp = netdev_priv(dev);
146 	volatile struct mace *mace = mp->mace;
147 	u8 maccc;
148 
149 	psc_write_word(PSC_ENETWR_CTL, 0x8800);
150 
151 	maccc = mace->maccc;
152 	mace->maccc = maccc & ~ENXMT;
153 
154 	mp->tx_slot = mp->tx_sloti = 0;
155 	mp->tx_count = N_TX_RING;
156 
157 	psc_write_word(PSC_ENETWR_CTL, 0x0400);
158 	mace->maccc = maccc;
159 }
160 
161 /*
162  * Disable DMA
163  */
164 
mace_dma_off(struct net_device * dev)165 static void mace_dma_off(struct net_device *dev)
166 {
167 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
168 	psc_write_word(PSC_ENETRD_CTL, 0x1000);
169 	psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
170 	psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
171 
172 	psc_write_word(PSC_ENETWR_CTL, 0x8800);
173 	psc_write_word(PSC_ENETWR_CTL, 0x1000);
174 	psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
175 	psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
176 }
177 
178 static const struct net_device_ops mace_netdev_ops = {
179 	.ndo_open		= mace_open,
180 	.ndo_stop		= mace_close,
181 	.ndo_start_xmit		= mace_xmit_start,
182 	.ndo_tx_timeout		= mace_tx_timeout,
183 	.ndo_set_rx_mode	= mace_set_multicast,
184 	.ndo_set_mac_address	= mace_set_address,
185 	.ndo_validate_addr	= eth_validate_addr,
186 };
187 
188 /*
189  * Not really much of a probe. The hardware table tells us if this
190  * model of Macintrash has a MACE (AV macintoshes)
191  */
192 
mace_probe(struct platform_device * pdev)193 static int mace_probe(struct platform_device *pdev)
194 {
195 	int j;
196 	struct mace_data *mp;
197 	unsigned char *addr;
198 	struct net_device *dev;
199 	unsigned char checksum = 0;
200 	u8 macaddr[ETH_ALEN];
201 	int err;
202 
203 	dev = alloc_etherdev(PRIV_BYTES);
204 	if (!dev)
205 		return -ENOMEM;
206 
207 	mp = netdev_priv(dev);
208 
209 	mp->device = &pdev->dev;
210 	platform_set_drvdata(pdev, dev);
211 	SET_NETDEV_DEV(dev, &pdev->dev);
212 
213 	dev->base_addr = (u32)MACE_BASE;
214 	mp->mace = MACE_BASE;
215 
216 	dev->irq = IRQ_MAC_MACE;
217 	mp->dma_intr = IRQ_MAC_MACE_DMA;
218 
219 	mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
220 
221 	/*
222 	 * The PROM contains 8 bytes which total 0xFF when XOR'd
223 	 * together. Due to the usual peculiar apple brain damage
224 	 * the bytes are spaced out in a strange boundary and the
225 	 * bits are reversed.
226 	 */
227 
228 	addr = MACE_PROM;
229 
230 	for (j = 0; j < 6; ++j) {
231 		u8 v = bitrev8(addr[j<<4]);
232 		checksum ^= v;
233 		macaddr[j] = v;
234 	}
235 	eth_hw_addr_set(dev, macaddr);
236 	for (; j < 8; ++j) {
237 		checksum ^= bitrev8(addr[j<<4]);
238 	}
239 
240 	if (checksum != 0xFF) {
241 		free_netdev(dev);
242 		return -ENODEV;
243 	}
244 
245 	dev->netdev_ops		= &mace_netdev_ops;
246 	dev->watchdog_timeo	= TX_TIMEOUT;
247 
248 	pr_info("Onboard MACE, hardware address %pM, chip revision 0x%04X\n",
249 		dev->dev_addr, mp->chipid);
250 
251 	err = register_netdev(dev);
252 	if (!err)
253 		return 0;
254 
255 	free_netdev(dev);
256 	return err;
257 }
258 
259 /*
260  * Reset the chip.
261  */
262 
mace_reset(struct net_device * dev)263 static void mace_reset(struct net_device *dev)
264 {
265 	struct mace_data *mp = netdev_priv(dev);
266 	volatile struct mace *mb = mp->mace;
267 	int i;
268 
269 	/* soft-reset the chip */
270 	i = 200;
271 	while (--i) {
272 		mb->biucc = SWRST;
273 		if (mb->biucc & SWRST) {
274 			udelay(10);
275 			continue;
276 		}
277 		break;
278 	}
279 	if (!i) {
280 		printk(KERN_ERR "macmace: cannot reset chip!\n");
281 		return;
282 	}
283 
284 	mb->maccc = 0;	/* turn off tx, rx */
285 	mb->imr = 0xFF;	/* disable all intrs for now */
286 	i = mb->ir;
287 
288 	mb->biucc = XMTSP_64;
289 	mb->utr = RTRD;
290 	mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
291 
292 	mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
293 	mb->rcvfc = 0;
294 
295 	/* load up the hardware address */
296 	__mace_set_address(dev, dev->dev_addr);
297 
298 	/* clear the multicast filter */
299 	if (mp->chipid == BROKEN_ADDRCHG_REV)
300 		mb->iac = LOGADDR;
301 	else {
302 		mb->iac = ADDRCHG | LOGADDR;
303 		while ((mb->iac & ADDRCHG) != 0)
304 			;
305 	}
306 	for (i = 0; i < 8; ++i)
307 		mb->ladrf = 0;
308 
309 	/* done changing address */
310 	if (mp->chipid != BROKEN_ADDRCHG_REV)
311 		mb->iac = 0;
312 
313 	mb->plscc = PORTSEL_AUI;
314 }
315 
316 /*
317  * Load the address on a mace controller.
318  */
319 
__mace_set_address(struct net_device * dev,const void * addr)320 static void __mace_set_address(struct net_device *dev, const void *addr)
321 {
322 	struct mace_data *mp = netdev_priv(dev);
323 	volatile struct mace *mb = mp->mace;
324 	const unsigned char *p = addr;
325 	u8 macaddr[ETH_ALEN];
326 	int i;
327 
328 	/* load up the hardware address */
329 	if (mp->chipid == BROKEN_ADDRCHG_REV)
330 		mb->iac = PHYADDR;
331 	else {
332 		mb->iac = ADDRCHG | PHYADDR;
333 		while ((mb->iac & ADDRCHG) != 0)
334 			;
335 	}
336 	for (i = 0; i < 6; ++i)
337 		mb->padr = macaddr[i] = p[i];
338 	eth_hw_addr_set(dev, macaddr);
339 	if (mp->chipid != BROKEN_ADDRCHG_REV)
340 		mb->iac = 0;
341 }
342 
mace_set_address(struct net_device * dev,void * addr)343 static int mace_set_address(struct net_device *dev, void *addr)
344 {
345 	struct mace_data *mp = netdev_priv(dev);
346 	volatile struct mace *mb = mp->mace;
347 	unsigned long flags;
348 	u8 maccc;
349 
350 	local_irq_save(flags);
351 
352 	maccc = mb->maccc;
353 
354 	__mace_set_address(dev, addr);
355 
356 	mb->maccc = maccc;
357 
358 	local_irq_restore(flags);
359 
360 	return 0;
361 }
362 
363 /*
364  * Open the Macintosh MACE. Most of this is playing with the DMA
365  * engine. The ethernet chip is quite friendly.
366  */
367 
mace_open(struct net_device * dev)368 static int mace_open(struct net_device *dev)
369 {
370 	struct mace_data *mp = netdev_priv(dev);
371 	volatile struct mace *mb = mp->mace;
372 
373 	/* reset the chip */
374 	mace_reset(dev);
375 
376 	if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
377 		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
378 		return -EAGAIN;
379 	}
380 	if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
381 		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
382 		free_irq(dev->irq, dev);
383 		return -EAGAIN;
384 	}
385 
386 	/* Allocate the DMA ring buffers */
387 
388 	mp->tx_ring = dma_alloc_coherent(mp->device,
389 					 N_TX_RING * MACE_BUFF_SIZE,
390 					 &mp->tx_ring_phys, GFP_KERNEL);
391 	if (mp->tx_ring == NULL)
392 		goto out1;
393 
394 	mp->rx_ring = dma_alloc_coherent(mp->device,
395 					 N_RX_RING * MACE_BUFF_SIZE,
396 					 &mp->rx_ring_phys, GFP_KERNEL);
397 	if (mp->rx_ring == NULL)
398 		goto out2;
399 
400 	mace_dma_off(dev);
401 
402 	/* Not sure what these do */
403 
404 	psc_write_word(PSC_ENETWR_CTL, 0x9000);
405 	psc_write_word(PSC_ENETRD_CTL, 0x9000);
406 	psc_write_word(PSC_ENETWR_CTL, 0x0400);
407 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
408 
409 	mace_rxdma_reset(dev);
410 	mace_txdma_reset(dev);
411 
412 	/* turn it on! */
413 	mb->maccc = ENXMT | ENRCV;
414 	/* enable all interrupts except receive interrupts */
415 	mb->imr = RCVINT;
416 	return 0;
417 
418 out2:
419 	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
420 	                  mp->tx_ring, mp->tx_ring_phys);
421 out1:
422 	free_irq(dev->irq, dev);
423 	free_irq(mp->dma_intr, dev);
424 	return -ENOMEM;
425 }
426 
427 /*
428  * Shut down the mace and its interrupt channel
429  */
430 
mace_close(struct net_device * dev)431 static int mace_close(struct net_device *dev)
432 {
433 	struct mace_data *mp = netdev_priv(dev);
434 	volatile struct mace *mb = mp->mace;
435 
436 	mb->maccc = 0;		/* disable rx and tx	 */
437 	mb->imr = 0xFF;		/* disable all irqs	 */
438 	mace_dma_off(dev);	/* disable rx and tx dma */
439 
440 	return 0;
441 }
442 
443 /*
444  * Transmit a frame
445  */
446 
mace_xmit_start(struct sk_buff * skb,struct net_device * dev)447 static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
448 {
449 	struct mace_data *mp = netdev_priv(dev);
450 	unsigned long flags;
451 
452 	/* Stop the queue since there's only the one buffer */
453 
454 	local_irq_save(flags);
455 	netif_stop_queue(dev);
456 	if (!mp->tx_count) {
457 		printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
458 		local_irq_restore(flags);
459 		return NETDEV_TX_BUSY;
460 	}
461 	mp->tx_count--;
462 	local_irq_restore(flags);
463 
464 	dev->stats.tx_packets++;
465 	dev->stats.tx_bytes += skb->len;
466 
467 	/* We need to copy into our xmit buffer to take care of alignment and caching issues */
468 	skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
469 
470 	/* load the Tx DMA and fire it off */
471 
472 	psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32)  mp->tx_ring_phys);
473 	psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
474 	psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
475 
476 	mp->tx_slot ^= 0x10;
477 
478 	dev_kfree_skb(skb);
479 
480 	return NETDEV_TX_OK;
481 }
482 
mace_set_multicast(struct net_device * dev)483 static void mace_set_multicast(struct net_device *dev)
484 {
485 	struct mace_data *mp = netdev_priv(dev);
486 	volatile struct mace *mb = mp->mace;
487 	int i;
488 	u32 crc;
489 	u8 maccc;
490 	unsigned long flags;
491 
492 	local_irq_save(flags);
493 	maccc = mb->maccc;
494 	mb->maccc &= ~PROM;
495 
496 	if (dev->flags & IFF_PROMISC) {
497 		mb->maccc |= PROM;
498 	} else {
499 		unsigned char multicast_filter[8];
500 		struct netdev_hw_addr *ha;
501 
502 		if (dev->flags & IFF_ALLMULTI) {
503 			for (i = 0; i < 8; i++) {
504 				multicast_filter[i] = 0xFF;
505 			}
506 		} else {
507 			for (i = 0; i < 8; i++)
508 				multicast_filter[i] = 0;
509 			netdev_for_each_mc_addr(ha, dev) {
510 				crc = ether_crc_le(6, ha->addr);
511 				/* bit number in multicast_filter */
512 				i = crc >> 26;
513 				multicast_filter[i >> 3] |= 1 << (i & 7);
514 			}
515 		}
516 
517 		if (mp->chipid == BROKEN_ADDRCHG_REV)
518 			mb->iac = LOGADDR;
519 		else {
520 			mb->iac = ADDRCHG | LOGADDR;
521 			while ((mb->iac & ADDRCHG) != 0)
522 				;
523 		}
524 		for (i = 0; i < 8; ++i)
525 			mb->ladrf = multicast_filter[i];
526 		if (mp->chipid != BROKEN_ADDRCHG_REV)
527 			mb->iac = 0;
528 	}
529 
530 	mb->maccc = maccc;
531 	local_irq_restore(flags);
532 }
533 
mace_handle_misc_intrs(struct net_device * dev,int intr)534 static void mace_handle_misc_intrs(struct net_device *dev, int intr)
535 {
536 	struct mace_data *mp = netdev_priv(dev);
537 	volatile struct mace *mb = mp->mace;
538 	static int mace_babbles, mace_jabbers;
539 
540 	if (intr & MPCO)
541 		dev->stats.rx_missed_errors += 256;
542 	dev->stats.rx_missed_errors += mb->mpc;   /* reading clears it */
543 	if (intr & RNTPCO)
544 		dev->stats.rx_length_errors += 256;
545 	dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
546 	if (intr & CERR)
547 		++dev->stats.tx_heartbeat_errors;
548 	if (intr & BABBLE)
549 		if (mace_babbles++ < 4)
550 			printk(KERN_DEBUG "macmace: babbling transmitter\n");
551 	if (intr & JABBER)
552 		if (mace_jabbers++ < 4)
553 			printk(KERN_DEBUG "macmace: jabbering transceiver\n");
554 }
555 
mace_interrupt(int irq,void * dev_id)556 static irqreturn_t mace_interrupt(int irq, void *dev_id)
557 {
558 	struct net_device *dev = (struct net_device *) dev_id;
559 	struct mace_data *mp = netdev_priv(dev);
560 	volatile struct mace *mb = mp->mace;
561 	int intr, fs;
562 	unsigned long flags;
563 
564 	/* don't want the dma interrupt handler to fire */
565 	local_irq_save(flags);
566 
567 	intr = mb->ir; /* read interrupt register */
568 	mace_handle_misc_intrs(dev, intr);
569 
570 	if (intr & XMTINT) {
571 		fs = mb->xmtfs;
572 		if ((fs & XMTSV) == 0) {
573 			printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
574 			mace_reset(dev);
575 			/*
576 			 * XXX mace likes to hang the machine after a xmtfs error.
577 			 * This is hard to reproduce, resetting *may* help
578 			 */
579 		}
580 		/* dma should have finished */
581 		if (!mp->tx_count) {
582 			printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
583 		}
584 		/* Update stats */
585 		if (fs & (UFLO|LCOL|LCAR|RTRY)) {
586 			++dev->stats.tx_errors;
587 			if (fs & LCAR)
588 				++dev->stats.tx_carrier_errors;
589 			else if (fs & (UFLO|LCOL|RTRY)) {
590 				++dev->stats.tx_aborted_errors;
591 				if (mb->xmtfs & UFLO) {
592 					dev->stats.tx_fifo_errors++;
593 					mace_txdma_reset(dev);
594 				}
595 			}
596 		}
597 	}
598 
599 	if (mp->tx_count)
600 		netif_wake_queue(dev);
601 
602 	local_irq_restore(flags);
603 
604 	return IRQ_HANDLED;
605 }
606 
mace_tx_timeout(struct net_device * dev,unsigned int txqueue)607 static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue)
608 {
609 	struct mace_data *mp = netdev_priv(dev);
610 	volatile struct mace *mb = mp->mace;
611 	unsigned long flags;
612 
613 	local_irq_save(flags);
614 
615 	/* turn off both tx and rx and reset the chip */
616 	mb->maccc = 0;
617 	printk(KERN_ERR "macmace: transmit timeout - resetting\n");
618 	mace_txdma_reset(dev);
619 	mace_reset(dev);
620 
621 	/* restart rx dma */
622 	mace_rxdma_reset(dev);
623 
624 	mp->tx_count = N_TX_RING;
625 	netif_wake_queue(dev);
626 
627 	/* turn it on! */
628 	mb->maccc = ENXMT | ENRCV;
629 	/* enable all interrupts except receive interrupts */
630 	mb->imr = RCVINT;
631 
632 	local_irq_restore(flags);
633 }
634 
635 /*
636  * Handle a newly arrived frame
637  */
638 
mace_dma_rx_frame(struct net_device * dev,struct mace_frame * mf)639 static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
640 {
641 	struct sk_buff *skb;
642 	unsigned int frame_status = mf->rcvsts;
643 
644 	if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
645 		dev->stats.rx_errors++;
646 		if (frame_status & RS_OFLO)
647 			dev->stats.rx_fifo_errors++;
648 		if (frame_status & RS_CLSN)
649 			dev->stats.collisions++;
650 		if (frame_status & RS_FRAMERR)
651 			dev->stats.rx_frame_errors++;
652 		if (frame_status & RS_FCSERR)
653 			dev->stats.rx_crc_errors++;
654 	} else {
655 		unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
656 
657 		skb = netdev_alloc_skb(dev, frame_length + 2);
658 		if (!skb) {
659 			dev->stats.rx_dropped++;
660 			return;
661 		}
662 		skb_reserve(skb, 2);
663 		skb_put_data(skb, mf->data, frame_length);
664 
665 		skb->protocol = eth_type_trans(skb, dev);
666 		netif_rx(skb);
667 		dev->stats.rx_packets++;
668 		dev->stats.rx_bytes += frame_length;
669 	}
670 }
671 
672 /*
673  * The PSC has passed us a DMA interrupt event.
674  */
675 
mace_dma_intr(int irq,void * dev_id)676 static irqreturn_t mace_dma_intr(int irq, void *dev_id)
677 {
678 	struct net_device *dev = (struct net_device *) dev_id;
679 	struct mace_data *mp = netdev_priv(dev);
680 	int left, head;
681 	u16 status;
682 	u32 baka;
683 
684 	/* Not sure what this does */
685 
686 	while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
687 	if (!(baka & 0x60000000)) return IRQ_NONE;
688 
689 	/*
690 	 * Process the read queue
691 	 */
692 
693 	status = psc_read_word(PSC_ENETRD_CTL);
694 
695 	if (status & 0x2000) {
696 		mace_rxdma_reset(dev);
697 	} else if (status & 0x0100) {
698 		psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
699 
700 		left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
701 		head = N_RX_RING - left;
702 
703 		/* Loop through the ring buffer and process new packages */
704 
705 		while (mp->rx_tail < head) {
706 			mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
707 				+ (mp->rx_tail * MACE_BUFF_SIZE)));
708 			mp->rx_tail++;
709 		}
710 
711 		/* If we're out of buffers in this ring then switch to */
712 		/* the other set, otherwise just reactivate this one.  */
713 
714 		if (!left) {
715 			mace_load_rxdma_base(dev, mp->rx_slot);
716 			mp->rx_slot ^= 0x10;
717 		} else {
718 			psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
719 		}
720 	}
721 
722 	/*
723 	 * Process the write queue
724 	 */
725 
726 	status = psc_read_word(PSC_ENETWR_CTL);
727 
728 	if (status & 0x2000) {
729 		mace_txdma_reset(dev);
730 	} else if (status & 0x0100) {
731 		psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
732 		mp->tx_sloti ^= 0x10;
733 		mp->tx_count++;
734 	}
735 	return IRQ_HANDLED;
736 }
737 
738 MODULE_LICENSE("GPL");
739 MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
740 MODULE_ALIAS("platform:macmace");
741 
mac_mace_device_remove(struct platform_device * pdev)742 static int mac_mace_device_remove(struct platform_device *pdev)
743 {
744 	struct net_device *dev = platform_get_drvdata(pdev);
745 	struct mace_data *mp = netdev_priv(dev);
746 
747 	unregister_netdev(dev);
748 
749 	free_irq(dev->irq, dev);
750 	free_irq(IRQ_MAC_MACE_DMA, dev);
751 
752 	dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
753 	                  mp->rx_ring, mp->rx_ring_phys);
754 	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
755 	                  mp->tx_ring, mp->tx_ring_phys);
756 
757 	free_netdev(dev);
758 
759 	return 0;
760 }
761 
762 static struct platform_driver mac_mace_driver = {
763 	.probe  = mace_probe,
764 	.remove = mac_mace_device_remove,
765 	.driver	= {
766 		.name	= mac_mace_string,
767 	},
768 };
769 
770 module_platform_driver(mac_mace_driver);
771