|
|
|
@ -359,7 +359,7 @@ static int he_init_one(struct pci_dev *pci_dev, |
|
|
|
|
|
|
|
|
|
if (pci_enable_device(pci_dev)) |
|
|
|
|
return -EIO; |
|
|
|
|
if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) { |
|
|
|
|
if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) { |
|
|
|
|
printk(KERN_WARNING "he: no suitable dma available\n"); |
|
|
|
|
err = -EIO; |
|
|
|
|
goto init_one_failure; |
|
|
|
@ -533,9 +533,9 @@ static void he_init_tx_lbfp(struct he_dev *he_dev) |
|
|
|
|
|
|
|
|
|
static int he_init_tpdrq(struct he_dev *he_dev) |
|
|
|
|
{ |
|
|
|
|
he_dev->tpdrq_base = pci_zalloc_consistent(he_dev->pci_dev, |
|
|
|
|
CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), |
|
|
|
|
&he_dev->tpdrq_phys); |
|
|
|
|
he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, |
|
|
|
|
CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), |
|
|
|
|
&he_dev->tpdrq_phys, GFP_KERNEL); |
|
|
|
|
if (he_dev->tpdrq_base == NULL) { |
|
|
|
|
hprintk("failed to alloc tpdrq\n"); |
|
|
|
|
return -ENOMEM; |
|
|
|
@ -796,16 +796,16 @@ static int he_init_group(struct he_dev *he_dev, int group) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* large buffer pool */ |
|
|
|
|
he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev, |
|
|
|
|
he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev, |
|
|
|
|
CONFIG_RBPL_BUFSIZE, 64, 0); |
|
|
|
|
if (he_dev->rbpl_pool == NULL) { |
|
|
|
|
hprintk("unable to create rbpl pool\n"); |
|
|
|
|
goto out_free_rbpl_virt; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
he_dev->rbpl_base = pci_zalloc_consistent(he_dev->pci_dev, |
|
|
|
|
CONFIG_RBPL_SIZE * sizeof(struct he_rbp), |
|
|
|
|
&he_dev->rbpl_phys); |
|
|
|
|
he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, |
|
|
|
|
CONFIG_RBPL_SIZE * sizeof(struct he_rbp), |
|
|
|
|
&he_dev->rbpl_phys, GFP_KERNEL); |
|
|
|
|
if (he_dev->rbpl_base == NULL) { |
|
|
|
|
hprintk("failed to alloc rbpl_base\n"); |
|
|
|
|
goto out_destroy_rbpl_pool; |
|
|
|
@ -815,7 +815,7 @@ static int he_init_group(struct he_dev *he_dev, int group) |
|
|
|
|
|
|
|
|
|
for (i = 0; i < CONFIG_RBPL_SIZE; ++i) { |
|
|
|
|
|
|
|
|
|
heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping); |
|
|
|
|
heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping); |
|
|
|
|
if (!heb) |
|
|
|
|
goto out_free_rbpl; |
|
|
|
|
heb->mapping = mapping; |
|
|
|
@ -842,9 +842,9 @@ static int he_init_group(struct he_dev *he_dev, int group) |
|
|
|
|
|
|
|
|
|
/* rx buffer ready queue */ |
|
|
|
|
|
|
|
|
|
he_dev->rbrq_base = pci_zalloc_consistent(he_dev->pci_dev, |
|
|
|
|
CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), |
|
|
|
|
&he_dev->rbrq_phys); |
|
|
|
|
he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, |
|
|
|
|
CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), |
|
|
|
|
&he_dev->rbrq_phys, GFP_KERNEL); |
|
|
|
|
if (he_dev->rbrq_base == NULL) { |
|
|
|
|
hprintk("failed to allocate rbrq\n"); |
|
|
|
|
goto out_free_rbpl; |
|
|
|
@ -866,9 +866,9 @@ static int he_init_group(struct he_dev *he_dev, int group) |
|
|
|
|
|
|
|
|
|
/* tx buffer ready queue */ |
|
|
|
|
|
|
|
|
|
he_dev->tbrq_base = pci_zalloc_consistent(he_dev->pci_dev, |
|
|
|
|
CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), |
|
|
|
|
&he_dev->tbrq_phys); |
|
|
|
|
he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, |
|
|
|
|
CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), |
|
|
|
|
&he_dev->tbrq_phys, GFP_KERNEL); |
|
|
|
|
if (he_dev->tbrq_base == NULL) { |
|
|
|
|
hprintk("failed to allocate tbrq\n"); |
|
|
|
|
goto out_free_rbpq_base; |
|
|
|
@ -884,18 +884,18 @@ static int he_init_group(struct he_dev *he_dev, int group) |
|
|
|
|
return 0; |
|
|
|
|
|
|
|
|
|
out_free_rbpq_base: |
|
|
|
|
pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * |
|
|
|
|
sizeof(struct he_rbrq), he_dev->rbrq_base, |
|
|
|
|
he_dev->rbrq_phys); |
|
|
|
|
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * |
|
|
|
|
sizeof(struct he_rbrq), he_dev->rbrq_base, |
|
|
|
|
he_dev->rbrq_phys); |
|
|
|
|
out_free_rbpl: |
|
|
|
|
list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) |
|
|
|
|
pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); |
|
|
|
|
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); |
|
|
|
|
|
|
|
|
|
pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE * |
|
|
|
|
sizeof(struct he_rbp), he_dev->rbpl_base, |
|
|
|
|
he_dev->rbpl_phys); |
|
|
|
|
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE * |
|
|
|
|
sizeof(struct he_rbp), he_dev->rbpl_base, |
|
|
|
|
he_dev->rbpl_phys); |
|
|
|
|
out_destroy_rbpl_pool: |
|
|
|
|
pci_pool_destroy(he_dev->rbpl_pool); |
|
|
|
|
dma_pool_destroy(he_dev->rbpl_pool); |
|
|
|
|
out_free_rbpl_virt: |
|
|
|
|
kfree(he_dev->rbpl_virt); |
|
|
|
|
out_free_rbpl_table: |
|
|
|
@ -911,8 +911,11 @@ static int he_init_irq(struct he_dev *he_dev) |
|
|
|
|
/* 2.9.3.5 tail offset for each interrupt queue is located after the
|
|
|
|
|
end of the interrupt queue */ |
|
|
|
|
|
|
|
|
|
he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev, |
|
|
|
|
(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys); |
|
|
|
|
he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, |
|
|
|
|
(CONFIG_IRQ_SIZE + 1) |
|
|
|
|
* sizeof(struct he_irq), |
|
|
|
|
&he_dev->irq_phys, |
|
|
|
|
GFP_KERNEL); |
|
|
|
|
if (he_dev->irq_base == NULL) { |
|
|
|
|
hprintk("failed to allocate irq\n"); |
|
|
|
|
return -ENOMEM; |
|
|
|
@ -1419,10 +1422,10 @@ static int he_start(struct atm_dev *dev) |
|
|
|
|
|
|
|
|
|
he_init_tpdrq(he_dev); |
|
|
|
|
|
|
|
|
|
he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev, |
|
|
|
|
sizeof(struct he_tpd), TPD_ALIGNMENT, 0); |
|
|
|
|
he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev, |
|
|
|
|
sizeof(struct he_tpd), TPD_ALIGNMENT, 0); |
|
|
|
|
if (he_dev->tpd_pool == NULL) { |
|
|
|
|
hprintk("unable to create tpd pci_pool\n"); |
|
|
|
|
hprintk("unable to create tpd dma_pool\n"); |
|
|
|
|
return -ENOMEM;
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -1459,9 +1462,9 @@ static int he_start(struct atm_dev *dev) |
|
|
|
|
|
|
|
|
|
/* host status page */ |
|
|
|
|
|
|
|
|
|
he_dev->hsp = pci_zalloc_consistent(he_dev->pci_dev, |
|
|
|
|
sizeof(struct he_hsp), |
|
|
|
|
&he_dev->hsp_phys); |
|
|
|
|
he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev, |
|
|
|
|
sizeof(struct he_hsp), |
|
|
|
|
&he_dev->hsp_phys, GFP_KERNEL); |
|
|
|
|
if (he_dev->hsp == NULL) { |
|
|
|
|
hprintk("failed to allocate host status page\n"); |
|
|
|
|
return -ENOMEM; |
|
|
|
@ -1558,41 +1561,41 @@ he_stop(struct he_dev *he_dev) |
|
|
|
|
free_irq(he_dev->irq, he_dev); |
|
|
|
|
|
|
|
|
|
if (he_dev->irq_base) |
|
|
|
|
pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1) |
|
|
|
|
* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys); |
|
|
|
|
dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1) |
|
|
|
|
* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys); |
|
|
|
|
|
|
|
|
|
if (he_dev->hsp) |
|
|
|
|
pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp), |
|
|
|
|
he_dev->hsp, he_dev->hsp_phys); |
|
|
|
|
dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp), |
|
|
|
|
he_dev->hsp, he_dev->hsp_phys); |
|
|
|
|
|
|
|
|
|
if (he_dev->rbpl_base) { |
|
|
|
|
list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) |
|
|
|
|
pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); |
|
|
|
|
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); |
|
|
|
|
|
|
|
|
|
pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE |
|
|
|
|
* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); |
|
|
|
|
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE |
|
|
|
|
* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
kfree(he_dev->rbpl_virt); |
|
|
|
|
kfree(he_dev->rbpl_table); |
|
|
|
|
|
|
|
|
|
if (he_dev->rbpl_pool) |
|
|
|
|
pci_pool_destroy(he_dev->rbpl_pool); |
|
|
|
|
dma_pool_destroy(he_dev->rbpl_pool); |
|
|
|
|
|
|
|
|
|
if (he_dev->rbrq_base) |
|
|
|
|
pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), |
|
|
|
|
he_dev->rbrq_base, he_dev->rbrq_phys); |
|
|
|
|
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), |
|
|
|
|
he_dev->rbrq_base, he_dev->rbrq_phys); |
|
|
|
|
|
|
|
|
|
if (he_dev->tbrq_base) |
|
|
|
|
pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), |
|
|
|
|
he_dev->tbrq_base, he_dev->tbrq_phys); |
|
|
|
|
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), |
|
|
|
|
he_dev->tbrq_base, he_dev->tbrq_phys); |
|
|
|
|
|
|
|
|
|
if (he_dev->tpdrq_base) |
|
|
|
|
pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), |
|
|
|
|
he_dev->tpdrq_base, he_dev->tpdrq_phys); |
|
|
|
|
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), |
|
|
|
|
he_dev->tpdrq_base, he_dev->tpdrq_phys); |
|
|
|
|
|
|
|
|
|
if (he_dev->tpd_pool) |
|
|
|
|
pci_pool_destroy(he_dev->tpd_pool); |
|
|
|
|
dma_pool_destroy(he_dev->tpd_pool); |
|
|
|
|
|
|
|
|
|
if (he_dev->pci_dev) { |
|
|
|
|
pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command); |
|
|
|
@ -1610,7 +1613,7 @@ __alloc_tpd(struct he_dev *he_dev) |
|
|
|
|
struct he_tpd *tpd; |
|
|
|
|
dma_addr_t mapping; |
|
|
|
|
|
|
|
|
|
tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping); |
|
|
|
|
tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping); |
|
|
|
|
if (tpd == NULL) |
|
|
|
|
return NULL; |
|
|
|
|
|
|
|
|
@ -1681,7 +1684,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) |
|
|
|
|
if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) { |
|
|
|
|
clear_bit(i, he_dev->rbpl_table); |
|
|
|
|
list_del(&heb->entry); |
|
|
|
|
pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); |
|
|
|
|
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
goto next_rbrq_entry; |
|
|
|
@ -1774,7 +1777,7 @@ return_host_buffers: |
|
|
|
|
++pdus_assembled; |
|
|
|
|
|
|
|
|
|
list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry) |
|
|
|
|
pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); |
|
|
|
|
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); |
|
|
|
|
INIT_LIST_HEAD(&he_vcc->buffers); |
|
|
|
|
he_vcc->pdu_len = 0; |
|
|
|
|
|
|
|
|
@ -1843,10 +1846,10 @@ he_service_tbrq(struct he_dev *he_dev, int group) |
|
|
|
|
|
|
|
|
|
for (slot = 0; slot < TPD_MAXIOV; ++slot) { |
|
|
|
|
if (tpd->iovec[slot].addr) |
|
|
|
|
pci_unmap_single(he_dev->pci_dev, |
|
|
|
|
dma_unmap_single(&he_dev->pci_dev->dev, |
|
|
|
|
tpd->iovec[slot].addr, |
|
|
|
|
tpd->iovec[slot].len & TPD_LEN_MASK, |
|
|
|
|
PCI_DMA_TODEVICE); |
|
|
|
|
DMA_TO_DEVICE); |
|
|
|
|
if (tpd->iovec[slot].len & TPD_LST) |
|
|
|
|
break; |
|
|
|
|
|
|
|
|
@ -1861,7 +1864,7 @@ he_service_tbrq(struct he_dev *he_dev, int group) |
|
|
|
|
|
|
|
|
|
next_tbrq_entry: |
|
|
|
|
if (tpd) |
|
|
|
|
pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); |
|
|
|
|
dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); |
|
|
|
|
he_dev->tbrq_head = (struct he_tbrq *) |
|
|
|
|
((unsigned long) he_dev->tbrq_base | |
|
|
|
|
TBRQ_MASK(he_dev->tbrq_head + 1)); |
|
|
|
@ -1905,7 +1908,7 @@ he_service_rbpl(struct he_dev *he_dev, int group) |
|
|
|
|
} |
|
|
|
|
he_dev->rbpl_hint = i + 1; |
|
|
|
|
|
|
|
|
|
heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping); |
|
|
|
|
heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping); |
|
|
|
|
if (!heb) |
|
|
|
|
break; |
|
|
|
|
heb->mapping = mapping; |
|
|
|
@ -2084,10 +2087,10 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid) |
|
|
|
|
*/ |
|
|
|
|
for (slot = 0; slot < TPD_MAXIOV; ++slot) { |
|
|
|
|
if (tpd->iovec[slot].addr) |
|
|
|
|
pci_unmap_single(he_dev->pci_dev, |
|
|
|
|
dma_unmap_single(&he_dev->pci_dev->dev, |
|
|
|
|
tpd->iovec[slot].addr, |
|
|
|
|
tpd->iovec[slot].len & TPD_LEN_MASK, |
|
|
|
|
PCI_DMA_TODEVICE); |
|
|
|
|
DMA_TO_DEVICE); |
|
|
|
|
} |
|
|
|
|
if (tpd->skb) { |
|
|
|
|
if (tpd->vcc->pop) |
|
|
|
@ -2096,7 +2099,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid) |
|
|
|
|
dev_kfree_skb_any(tpd->skb); |
|
|
|
|
atomic_inc(&tpd->vcc->stats->tx_err); |
|
|
|
|
} |
|
|
|
|
pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); |
|
|
|
|
dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); |
|
|
|
|
return; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -2550,8 +2553,8 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
#ifdef USE_SCATTERGATHER |
|
|
|
|
tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data, |
|
|
|
|
skb_headlen(skb), PCI_DMA_TODEVICE); |
|
|
|
|
tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data, |
|
|
|
|
skb_headlen(skb), DMA_TO_DEVICE); |
|
|
|
|
tpd->iovec[slot].len = skb_headlen(skb); |
|
|
|
|
++slot; |
|
|
|
|
|
|
|
|
@ -2579,9 +2582,9 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) |
|
|
|
|
slot = 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, |
|
|
|
|
tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, |
|
|
|
|
(void *) page_address(frag->page) + frag->page_offset, |
|
|
|
|
frag->size, PCI_DMA_TODEVICE); |
|
|
|
|
frag->size, DMA_TO_DEVICE); |
|
|
|
|
tpd->iovec[slot].len = frag->size; |
|
|
|
|
++slot; |
|
|
|
|
|
|
|
|
@ -2589,7 +2592,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) |
|
|
|
|
|
|
|
|
|
tpd->iovec[slot - 1].len |= TPD_LST; |
|
|
|
|
#else |
|
|
|
|
tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); |
|
|
|
|
tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE); |
|
|
|
|
tpd->length0 = skb->len | TPD_LST; |
|
|
|
|
#endif |
|
|
|
|
tpd->status |= TPD_INT; |
|
|
|
|