RDMA/erdma: Refactor the initialization and destruction of EQ
We extracted the common parts of the initialization/destruction process to make the code cleaner. Signed-off-by: Cheng Xu <chengyou@linux.alibaba.com> Link: https://patch.msgid.link/20240902112920.58749-2-chengyou@linux.alibaba.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
committed by
Leon Romanovsky
parent
34efda1735
commit
b24506f1c3
@@ -274,7 +274,8 @@ void notify_eq(struct erdma_eq *eq);
|
||||
void *get_next_valid_eqe(struct erdma_eq *eq);
|
||||
|
||||
int erdma_aeq_init(struct erdma_dev *dev);
|
||||
void erdma_aeq_destroy(struct erdma_dev *dev);
|
||||
int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth);
|
||||
void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq);
|
||||
|
||||
void erdma_aeq_event_handler(struct erdma_dev *dev);
|
||||
void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb);
|
||||
|
||||
@@ -158,20 +158,13 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
|
||||
{
|
||||
struct erdma_cmdq *cmdq = &dev->cmdq;
|
||||
struct erdma_eq *eq = &cmdq->eq;
|
||||
int ret;
|
||||
|
||||
eq->depth = cmdq->max_outstandings;
|
||||
eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
|
||||
&eq->qbuf_dma_addr, GFP_KERNEL);
|
||||
if (!eq->qbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&eq->lock);
|
||||
atomic64_set(&eq->event_num, 0);
|
||||
ret = erdma_eq_common_init(dev, eq, cmdq->max_outstandings);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG;
|
||||
eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
|
||||
if (!eq->dbrec)
|
||||
goto err_out;
|
||||
|
||||
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
|
||||
upper_32_bits(eq->qbuf_dma_addr));
|
||||
@@ -181,12 +174,6 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
|
||||
erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
|
||||
eq->qbuf_dma_addr);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int erdma_cmdq_init(struct erdma_dev *dev)
|
||||
@@ -247,10 +234,7 @@ void erdma_cmdq_destroy(struct erdma_dev *dev)
|
||||
|
||||
clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
|
||||
|
||||
dma_free_coherent(&dev->pdev->dev, cmdq->eq.depth << EQE_SHIFT,
|
||||
cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
|
||||
|
||||
dma_pool_free(dev->db_pool, cmdq->eq.dbrec, cmdq->eq.dbrec_dma);
|
||||
erdma_eq_destroy(dev, &cmdq->eq);
|
||||
|
||||
dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
|
||||
cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
|
||||
|
||||
@@ -80,25 +80,51 @@ void erdma_aeq_event_handler(struct erdma_dev *dev)
|
||||
notify_eq(&dev->aeq);
|
||||
}
|
||||
|
||||
int erdma_aeq_init(struct erdma_dev *dev)
|
||||
int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth)
|
||||
{
|
||||
struct erdma_eq *eq = &dev->aeq;
|
||||
u32 buf_size = depth << EQE_SHIFT;
|
||||
|
||||
eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
|
||||
|
||||
eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
|
||||
eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, buf_size,
|
||||
&eq->qbuf_dma_addr, GFP_KERNEL);
|
||||
if (!eq->qbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
|
||||
if (!eq->dbrec)
|
||||
goto err_free_qbuf;
|
||||
|
||||
spin_lock_init(&eq->lock);
|
||||
atomic64_set(&eq->event_num, 0);
|
||||
atomic64_set(&eq->notify_num, 0);
|
||||
eq->ci = 0;
|
||||
eq->depth = depth;
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_qbuf:
|
||||
dma_free_coherent(&dev->pdev->dev, buf_size, eq->qbuf,
|
||||
eq->qbuf_dma_addr);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq)
|
||||
{
|
||||
dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
|
||||
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
|
||||
eq->qbuf_dma_addr);
|
||||
}
|
||||
|
||||
int erdma_aeq_init(struct erdma_dev *dev)
|
||||
{
|
||||
struct erdma_eq *eq = &dev->aeq;
|
||||
int ret;
|
||||
|
||||
ret = erdma_eq_common_init(dev, &dev->aeq, ERDMA_DEFAULT_EQ_DEPTH);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
|
||||
eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
|
||||
if (!eq->dbrec)
|
||||
goto err_out;
|
||||
|
||||
erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
|
||||
upper_32_bits(eq->qbuf_dma_addr));
|
||||
@@ -108,22 +134,6 @@ int erdma_aeq_init(struct erdma_dev *dev)
|
||||
erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
|
||||
eq->qbuf_dma_addr);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void erdma_aeq_destroy(struct erdma_dev *dev)
|
||||
{
|
||||
struct erdma_eq *eq = &dev->aeq;
|
||||
|
||||
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
|
||||
eq->qbuf_dma_addr);
|
||||
|
||||
dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
|
||||
}
|
||||
|
||||
void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
|
||||
@@ -234,32 +244,21 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
|
||||
struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
|
||||
int ret;
|
||||
|
||||
eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
|
||||
eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
|
||||
&eq->qbuf_dma_addr, GFP_KERNEL);
|
||||
if (!eq->qbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&eq->lock);
|
||||
atomic64_set(&eq->event_num, 0);
|
||||
atomic64_set(&eq->notify_num, 0);
|
||||
ret = erdma_eq_common_init(dev, eq, ERDMA_DEFAULT_EQ_DEPTH);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
|
||||
(ceqn + 1) * ERDMA_DB_SIZE;
|
||||
|
||||
eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
|
||||
if (!eq->dbrec) {
|
||||
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
|
||||
eq->qbuf, eq->qbuf_dma_addr);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
eq->ci = 0;
|
||||
dev->ceqs[ceqn].dev = dev;
|
||||
dev->ceqs[ceqn].ready = true;
|
||||
|
||||
/* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
|
||||
ret = create_eq_cmd(dev, ceqn + 1, eq);
|
||||
dev->ceqs[ceqn].ready = ret ? false : true;
|
||||
if (ret) {
|
||||
erdma_eq_destroy(dev, eq);
|
||||
dev->ceqs[ceqn].ready = false;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -283,9 +282,7 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
|
||||
if (err)
|
||||
return;
|
||||
|
||||
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
|
||||
eq->qbuf_dma_addr);
|
||||
dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
|
||||
erdma_eq_destroy(dev, eq);
|
||||
}
|
||||
|
||||
int erdma_ceqs_init(struct erdma_dev *dev)
|
||||
|
||||
@@ -333,7 +333,7 @@ err_uninit_cmdq:
|
||||
erdma_cmdq_destroy(dev);
|
||||
|
||||
err_uninit_aeq:
|
||||
erdma_aeq_destroy(dev);
|
||||
erdma_eq_destroy(dev, &dev->aeq);
|
||||
|
||||
err_uninit_comm_irq:
|
||||
erdma_comm_irq_uninit(dev);
|
||||
@@ -366,7 +366,7 @@ static void erdma_remove_dev(struct pci_dev *pdev)
|
||||
erdma_ceqs_uninit(dev);
|
||||
erdma_hw_reset(dev);
|
||||
erdma_cmdq_destroy(dev);
|
||||
erdma_aeq_destroy(dev);
|
||||
erdma_eq_destroy(dev, &dev->aeq);
|
||||
erdma_comm_irq_uninit(dev);
|
||||
pci_free_irq_vectors(dev->pdev);
|
||||
erdma_device_uninit(dev);
|
||||
|
||||
Reference in New Issue
Block a user