|
625 | 625 | return 0;
|
626 | 626 | }
|
627 | 627 |
|
628 |
| -@@ -457,16 +651,16 @@ ltq_etop_tx(struct sk_buff *skb, struct |
| 628 | +@@ -457,15 +651,16 @@ ltq_etop_tx(struct sk_buff *skb, struct |
629 | 629 | int queue = skb_get_queue_mapping(skb);
|
630 | 630 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
|
631 | 631 | struct ltq_etop_priv *priv = netdev_priv(dev);
|
|
641 | 641 | len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
|
642 | 642 |
|
643 | 643 | - if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
|
644 |
| -- dev_kfree_skb_any(skb); |
645 | 644 | + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) ||
|
646 | 645 | + priv->txch.skb[priv->txch.dma.desc]) {
|
647 | 646 | netdev_err(dev, "tx ring full\n");
|
648 | 647 | netif_tx_stop_queue(txq);
|
649 | 648 | return NETDEV_TX_BUSY;
|
650 |
| -@@ -474,7 +668,7 @@ ltq_etop_tx(struct sk_buff *skb, struct |
| 649 | +@@ -473,7 +668,7 @@ ltq_etop_tx(struct sk_buff *skb, struct |
651 | 650 |
|
652 | 651 | /* dma needs to start on a 16 byte aligned address */
|
653 | 652 | byte_offset = CPHYSADDR(skb->data) % 16;
|
|
656 | 655 |
|
657 | 656 | netif_trans_update(dev);
|
658 | 657 |
|
659 |
| -@@ -484,11 +678,11 @@ ltq_etop_tx(struct sk_buff *skb, struct |
| 658 | +@@ -483,11 +678,11 @@ ltq_etop_tx(struct sk_buff *skb, struct |
660 | 659 | wmb();
|
661 | 660 | desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
|
662 | 661 | LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
|
|
671 | 670 | netif_tx_stop_queue(txq);
|
672 | 671 |
|
673 | 672 | return NETDEV_TX_OK;
|
674 |
| -@@ -499,11 +693,14 @@ ltq_etop_change_mtu(struct net_device *d |
| 673 | +@@ -498,11 +693,14 @@ ltq_etop_change_mtu(struct net_device *d |
675 | 674 | {
|
676 | 675 | struct ltq_etop_priv *priv = netdev_priv(dev);
|
677 | 676 | unsigned long flags;
|
|
687 | 686 | spin_unlock_irqrestore(&priv->lock, flags);
|
688 | 687 |
|
689 | 688 | return 0;
|
690 |
| -@@ -556,6 +753,9 @@ ltq_etop_init(struct net_device *dev) |
| 689 | +@@ -555,6 +753,9 @@ ltq_etop_init(struct net_device *dev) |
691 | 690 | if (err)
|
692 | 691 | goto err_hw;
|
693 | 692 | ltq_etop_change_mtu(dev, 1500);
|
|
697 | 696 |
|
698 | 697 | memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
|
699 | 698 | if (!is_valid_ether_addr(mac.sa_data)) {
|
700 |
| -@@ -573,9 +773,10 @@ ltq_etop_init(struct net_device *dev) |
| 699 | +@@ -572,9 +773,10 @@ ltq_etop_init(struct net_device *dev) |
701 | 700 | dev->addr_assign_type = NET_ADDR_RANDOM;
|
702 | 701 |
|
703 | 702 | ltq_etop_set_multicast_list(dev);
|
|
711 | 710 | return 0;
|
712 | 711 |
|
713 | 712 | err_netdev:
|
714 |
| -@@ -595,6 +796,9 @@ ltq_etop_tx_timeout(struct net_device *d |
| 713 | +@@ -594,6 +796,9 @@ ltq_etop_tx_timeout(struct net_device *d |
715 | 714 | err = ltq_etop_hw_init(dev);
|
716 | 715 | if (err)
|
717 | 716 | goto err_hw;
|
|
721 | 720 | netif_trans_update(dev);
|
722 | 721 | netif_wake_queue(dev);
|
723 | 722 | return;
|
724 |
| -@@ -618,14 +822,19 @@ static const struct net_device_ops ltq_e |
| 723 | +@@ -617,14 +822,19 @@ static const struct net_device_ops ltq_e |
725 | 724 | .ndo_tx_timeout = ltq_etop_tx_timeout,
|
726 | 725 | };
|
727 | 726 |
|
|
745 | 744 |
|
746 | 745 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
747 | 746 | if (!res) {
|
748 |
| -@@ -651,31 +860,64 @@ ltq_etop_probe(struct platform_device *p |
| 747 | +@@ -650,31 +860,64 @@ ltq_etop_probe(struct platform_device *p |
749 | 748 | goto err_out;
|
750 | 749 | }
|
751 | 750 |
|
|
825 | 824 |
|
826 | 825 | err = register_netdev(dev);
|
827 | 826 | if (err)
|
828 |
| -@@ -704,31 +946,22 @@ ltq_etop_remove(struct platform_device * |
| 827 | +@@ -703,31 +946,22 @@ ltq_etop_remove(struct platform_device * |
829 | 828 | return 0;
|
830 | 829 | }
|
831 | 830 |
|
|
0 commit comments