net: txgbevf: init interrupts and request irqs
Add irq alloc flow functions for vf. Alloc pcie msix irqs for drivers and request_irq for tx/rx rings and misc other events. If the application is successful, config vertors for interrupts. Enable interrupts mask in wxvf_irq_enable. Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com> Link: https://patch.msgid.link/20250704094923.652-7-mengyuanlou@net-swift.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
4ee8afb44a
commit
fd0a2e03bf
@@ -11,6 +11,7 @@
|
||||
#include "wx_type.h"
|
||||
#include "wx_lib.h"
|
||||
#include "wx_sriov.h"
|
||||
#include "wx_vf.h"
|
||||
#include "wx_hw.h"
|
||||
|
||||
static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
|
||||
@@ -124,6 +125,11 @@ void wx_intr_enable(struct wx *wx, u64 qmask)
|
||||
{
|
||||
u32 mask;
|
||||
|
||||
if (wx->pdev->is_virtfn) {
|
||||
wr32(wx, WX_VXIMC, qmask);
|
||||
return;
|
||||
}
|
||||
|
||||
mask = (qmask & U32_MAX);
|
||||
if (mask)
|
||||
wr32(wx, WX_PX_IMC(0), mask);
|
||||
|
||||
@@ -1819,7 +1819,7 @@ static int wx_set_interrupt_capability(struct wx *wx)
|
||||
|
||||
/* We will try to get MSI-X interrupts first */
|
||||
ret = wx_acquire_msix_vectors(wx);
|
||||
if (ret == 0 || (ret == -ENOMEM))
|
||||
if (ret == 0 || (ret == -ENOMEM) || pdev->is_virtfn)
|
||||
return ret;
|
||||
|
||||
/* Disable VMDq support */
|
||||
@@ -2170,7 +2170,12 @@ int wx_init_interrupt_scheme(struct wx *wx)
|
||||
int ret;
|
||||
|
||||
/* Number of supported queues */
|
||||
wx_set_num_queues(wx);
|
||||
if (wx->pdev->is_virtfn) {
|
||||
if (wx->set_num_queues)
|
||||
wx->set_num_queues(wx);
|
||||
} else {
|
||||
wx_set_num_queues(wx);
|
||||
}
|
||||
|
||||
/* Set interrupt mode */
|
||||
ret = wx_set_interrupt_capability(wx);
|
||||
|
||||
@@ -1324,6 +1324,7 @@ struct wx {
|
||||
int (*setup_tc)(struct net_device *netdev, u8 tc);
|
||||
void (*do_reset)(struct net_device *netdev);
|
||||
int (*ptp_setup_sdp)(struct wx *wx);
|
||||
void (*set_num_queues)(struct wx *wx);
|
||||
|
||||
bool pps_enabled;
|
||||
u64 pps_width;
|
||||
|
||||
@@ -17,6 +17,7 @@ int wxvf_suspend(struct device *dev_d)
|
||||
struct wx *wx = pci_get_drvdata(pdev);
|
||||
|
||||
netif_device_detach(wx->netdev);
|
||||
wx_clear_interrupt_scheme(wx);
|
||||
pci_disable_device(pdev);
|
||||
|
||||
return 0;
|
||||
@@ -35,6 +36,7 @@ int wxvf_resume(struct device *dev_d)
|
||||
struct wx *wx = pci_get_drvdata(pdev);
|
||||
|
||||
pci_set_master(pdev);
|
||||
wx_init_interrupt_scheme(wx);
|
||||
netif_device_attach(wx->netdev);
|
||||
|
||||
return 0;
|
||||
@@ -51,6 +53,7 @@ void wxvf_remove(struct pci_dev *pdev)
|
||||
kfree(wx->vfinfo);
|
||||
kfree(wx->rss_key);
|
||||
kfree(wx->mac_table);
|
||||
wx_clear_interrupt_scheme(wx);
|
||||
pci_release_selected_regions(pdev,
|
||||
pci_select_bars(pdev, IORESOURCE_MEM));
|
||||
pci_disable_device(pdev);
|
||||
@@ -93,9 +96,8 @@ int wx_request_msix_irqs_vf(struct wx *wx)
|
||||
}
|
||||
}
|
||||
|
||||
err = request_threaded_irq(wx->msix_entry->vector, NULL,
|
||||
wx_msix_misc_vf, IRQF_ONESHOT,
|
||||
netdev->name, wx);
|
||||
err = request_threaded_irq(wx->msix_entry->vector, wx_msix_misc_vf,
|
||||
NULL, IRQF_ONESHOT, netdev->name, wx);
|
||||
if (err) {
|
||||
wx_err(wx, "request_irq for msix_other failed: %d\n", err);
|
||||
goto free_queue_irqs;
|
||||
@@ -241,9 +243,35 @@ int wx_set_mac_vf(struct net_device *netdev, void *p)
|
||||
}
|
||||
EXPORT_SYMBOL(wx_set_mac_vf);
|
||||
|
||||
static void wxvf_irq_enable(struct wx *wx)
|
||||
{
|
||||
wr32(wx, WX_VXIMC, wx->eims_enable_mask);
|
||||
}
|
||||
|
||||
static void wxvf_up_complete(struct wx *wx)
|
||||
{
|
||||
wx_configure_msix_vf(wx);
|
||||
|
||||
/* clear any pending interrupts, may auto mask */
|
||||
wr32(wx, WX_VXICR, U32_MAX);
|
||||
wxvf_irq_enable(wx);
|
||||
}
|
||||
|
||||
int wxvf_open(struct net_device *netdev)
|
||||
{
|
||||
struct wx *wx = netdev_priv(netdev);
|
||||
int err;
|
||||
|
||||
err = wx_request_msix_irqs_vf(wx);
|
||||
if (err)
|
||||
goto err_reset;
|
||||
|
||||
wxvf_up_complete(wx);
|
||||
|
||||
return 0;
|
||||
err_reset:
|
||||
wx_reset_vf(wx);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(wxvf_open);
|
||||
|
||||
@@ -251,8 +279,13 @@ static void wxvf_down(struct wx *wx)
|
||||
{
|
||||
struct net_device *netdev = wx->netdev;
|
||||
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
netif_tx_disable(netdev);
|
||||
wx_napi_disable_all(wx);
|
||||
wx_reset_vf(wx);
|
||||
|
||||
wx_clean_all_tx_rings(wx);
|
||||
wx_clean_all_rx_rings(wx);
|
||||
}
|
||||
|
||||
int wxvf_close(struct net_device *netdev)
|
||||
@@ -260,6 +293,7 @@ int wxvf_close(struct net_device *netdev)
|
||||
struct wx *wx = netdev_priv(netdev);
|
||||
|
||||
wxvf_down(wx);
|
||||
wx_free_irq(wx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#include "../libwx/wx_type.h"
|
||||
#include "../libwx/wx_hw.h"
|
||||
#include "../libwx/wx_lib.h"
|
||||
#include "../libwx/wx_mbx.h"
|
||||
#include "../libwx/wx_vf.h"
|
||||
#include "../libwx/wx_vf_common.h"
|
||||
@@ -43,6 +44,39 @@ static const struct net_device_ops txgbevf_netdev_ops = {
|
||||
.ndo_set_mac_address = wx_set_mac_vf,
|
||||
};
|
||||
|
||||
static void txgbevf_set_num_queues(struct wx *wx)
|
||||
{
|
||||
u32 def_q = 0, num_tcs = 0;
|
||||
u16 rss, queue;
|
||||
int ret = 0;
|
||||
|
||||
/* Start with base case */
|
||||
wx->num_rx_queues = 1;
|
||||
wx->num_tx_queues = 1;
|
||||
|
||||
spin_lock_bh(&wx->mbx.mbx_lock);
|
||||
/* fetch queue configuration from the PF */
|
||||
ret = wx_get_queues_vf(wx, &num_tcs, &def_q);
|
||||
spin_unlock_bh(&wx->mbx.mbx_lock);
|
||||
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
/* we need as many queues as traffic classes */
|
||||
if (num_tcs > 1) {
|
||||
wx->num_rx_queues = num_tcs;
|
||||
} else {
|
||||
rss = min_t(u16, num_online_cpus(), TXGBEVF_MAX_RSS_NUM);
|
||||
queue = min_t(u16, wx->mac.max_rx_queues, wx->mac.max_tx_queues);
|
||||
rss = min_t(u16, queue, rss);
|
||||
|
||||
if (wx->vfinfo->vf_api >= wx_mbox_api_13) {
|
||||
wx->num_rx_queues = rss;
|
||||
wx->num_tx_queues = rss;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void txgbevf_init_type_code(struct wx *wx)
|
||||
{
|
||||
switch (wx->device_id) {
|
||||
@@ -80,6 +114,8 @@ static int txgbevf_sw_init(struct wx *wx)
|
||||
if (err)
|
||||
goto err_init_mbx_params;
|
||||
|
||||
/* max q_vectors */
|
||||
wx->mac.max_msix_vectors = TXGBEVF_MAX_MSIX_VECTORS;
|
||||
/* Initialize the device type */
|
||||
txgbevf_init_type_code(wx);
|
||||
/* lock to protect mailbox accesses */
|
||||
@@ -116,6 +152,8 @@ static int txgbevf_sw_init(struct wx *wx)
|
||||
wx->tx_work_limit = TXGBEVF_DEFAULT_TX_WORK;
|
||||
wx->rx_work_limit = TXGBEVF_DEFAULT_RX_WORK;
|
||||
|
||||
wx->set_num_queues = txgbevf_set_num_queues;
|
||||
|
||||
return 0;
|
||||
err_reset_hw:
|
||||
kfree(wx->vfinfo);
|
||||
@@ -211,6 +249,10 @@ static int txgbevf_probe(struct pci_dev *pdev,
|
||||
eth_hw_addr_set(netdev, wx->mac.perm_addr);
|
||||
ether_addr_copy(netdev->perm_addr, wx->mac.addr);
|
||||
|
||||
err = wx_init_interrupt_scheme(wx);
|
||||
if (err)
|
||||
goto err_free_sw_init;
|
||||
|
||||
err = register_netdev(netdev);
|
||||
if (err)
|
||||
goto err_register;
|
||||
@@ -220,6 +262,8 @@ static int txgbevf_probe(struct pci_dev *pdev,
|
||||
return 0;
|
||||
|
||||
err_register:
|
||||
wx_clear_interrupt_scheme(wx);
|
||||
err_free_sw_init:
|
||||
kfree(wx->vfinfo);
|
||||
kfree(wx->rss_key);
|
||||
kfree(wx->mac_table);
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
#define TXGBEVF_DEV_ID_AML503F 0x503f
|
||||
#define TXGBEVF_DEV_ID_AML513F 0x513f
|
||||
|
||||
#define TXGBEVF_MAX_MSIX_VECTORS 2
|
||||
#define TXGBEVF_MAX_RSS_NUM 4
|
||||
#define TXGBEVF_MAX_RX_QUEUES 4
|
||||
#define TXGBEVF_MAX_TX_QUEUES 4
|
||||
#define TXGBEVF_DEFAULT_TXD 128
|
||||
|
||||
Reference in New Issue
Block a user