Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2025-04-29 (igb, igc, ixgbe, idpf)

For igb:
Kurt Kanzenbach adds linking of IRQs and queues to NAPI instances and
adds persistent NAPI config. Lastly, he removes undesired IRQs that
occur while busy polling.

For igc:
Kurt Kanzenbach switches the Tx mode for MQPRIO offload to harmonize the
current implementation with TAPRIO.

For ixgbe:
Jedrzej adds separate ethtool ops for E610 devices to account for device
differences.

Slawomir adds devlink region support for E610 devices.

For idpf:
Mateusz assigns and utilizes the ptype field out of libeth_rqe_info.

Michal removes unreachable code.

* '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  idpf: remove unreachable code from setting mailbox
  idpf: assign extracted ptype to struct libeth_rqe_info field
  ixgbe: devlink: add devlink region support for E610
  ixgbe: add E610 .set_phys_id() callback implementation
  ixgbe: apply different rules for setting FC on E610
  ixgbe: add support for ACPI WOL for E610
  ixgbe: create E610 specific ethtool_ops structure
  igc: Change Tx mode for MQPRIO offloading
  igc: Limit netdev_tc calls to MQPRIO
  igb: Get rid of spurious interrupts
  igb: Add support for persistent NAPI config
  igb: Link queues to NAPI instances
  igb: Link IRQs to NAPI instances
====================

Link: https://patch.msgid.link/20250429234651.3982025-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-05-01 17:51:31 -07:00
18 changed files with 667 additions and 97 deletions
@@ -120,3 +120,52 @@ EMP firmware image.
The driver does not currently support reloading the driver via
``DEVLINK_RELOAD_ACTION_DRIVER_REINIT``.
Regions
=======
The ``ixgbe`` driver implements the following regions for accessing internal
device data.
.. list-table:: regions implemented
:widths: 15 85
* - Name
- Description
* - ``nvm-flash``
- The contents of the entire flash chip, sometimes referred to as
the device's Non Volatile Memory.
* - ``shadow-ram``
- The contents of the Shadow RAM, which is loaded from the beginning
of the flash. Although the contents are primarily from the flash,
this area also contains data generated during device boot which is
not stored in flash.
* - ``device-caps``
- The contents of the device firmware's capabilities buffer. Useful to
determine the current state and configuration of the device.
Both the ``nvm-flash`` and ``shadow-ram`` regions can be accessed without a
snapshot. The ``device-caps`` region requires a snapshot as the contents are
sent by firmware and can't be split into separate reads.
Users can request an immediate capture of a snapshot for all three regions
via the ``DEVLINK_CMD_REGION_NEW`` command.
.. code:: shell
$ devlink region show
pci/0000:01:00.0/nvm-flash: size 10485760 snapshot [] max 1
pci/0000:01:00.0/device-caps: size 4096 snapshot [] max 10
$ devlink region new pci/0000:01:00.0/nvm-flash snapshot 1
$ devlink region dump pci/0000:01:00.0/nvm-flash snapshot 1
0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30
0000000000000010 0000 0000 ffff ff04 0029 8c00 0028 8cc8
0000000000000020 0016 0bb8 0016 1720 0000 0000 c00f 3ffc
0000000000000030 bada cce5 bada cce5 bada cce5 bada cce5
$ devlink region read pci/0000:01:00.0/nvm-flash snapshot 1 address 0 length 16
0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30
$ devlink region delete pci/0000:01:00.0/device-caps snapshot 1
+1 -17
View File
@@ -143,22 +143,6 @@ static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
return 0;
}
/**
* idpf_set_mb_vec_id - Set vector index for mailbox
* @adapter: adapter structure to access the vector chunks
*
* The first vector id in the requested vector chunks from the CP is for
* the mailbox
*/
static void idpf_set_mb_vec_id(struct idpf_adapter *adapter)
{
if (adapter->req_vec_chunks)
adapter->mb_vector.v_idx =
le16_to_cpu(adapter->caps.mailbox_vector_id);
else
adapter->mb_vector.v_idx = 0;
}
/**
* idpf_mb_intr_init - Initialize the mailbox interrupt
* @adapter: adapter structure to store the mailbox vector
@@ -349,7 +333,7 @@ int idpf_intr_req(struct idpf_adapter *adapter)
goto free_irq;
}
idpf_set_mb_vec_id(adapter);
adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id);
vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
if (!vecids) {
@@ -891,7 +891,6 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
* idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
* @ptype: pointer that will store packet type
*
* Decode the Rx descriptor and extract relevant information including the
* size and Rx packet type.
@@ -901,21 +900,20 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
*/
static void
idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
struct libeth_rqe_info *fields, u32 *ptype)
struct libeth_rqe_info *fields)
{
u64 qword;
qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
fields->len = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
*ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
fields->ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
}
/**
* idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
* @ptype: pointer that will store packet type
*
* Decode the Rx descriptor and extract relevant information including the
* size and Rx packet type.
@@ -925,12 +923,12 @@ idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
*/
static void
idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
struct libeth_rqe_info *fields, u32 *ptype)
struct libeth_rqe_info *fields)
{
fields->len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
*ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
fields->ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
}
/**
@@ -938,18 +936,17 @@ idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
* @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
* @ptype: pointer that will store packet type
*
*/
static void
idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
const union virtchnl2_rx_desc *rx_desc,
struct libeth_rqe_info *fields, u32 *ptype)
struct libeth_rqe_info *fields)
{
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
idpf_rx_singleq_extract_base_fields(rx_desc, fields, ptype);
idpf_rx_singleq_extract_base_fields(rx_desc, fields);
else
idpf_rx_singleq_extract_flex_fields(rx_desc, fields, ptype);
idpf_rx_singleq_extract_flex_fields(rx_desc, fields);
}
/**
@@ -972,7 +969,6 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
struct libeth_rqe_info fields = { };
union virtchnl2_rx_desc *rx_desc;
struct idpf_rx_buf *rx_buf;
u32 ptype;
/* get the Rx desc from Rx queue based on 'next_to_clean' */
rx_desc = &rx_q->rx[ntc];
@@ -993,7 +989,7 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
*/
dma_rmb();
idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields, &ptype);
idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
rx_buf = &rx_q->rx_buf[ntc];
if (!libeth_rx_sync_for_cpu(rx_buf, fields.len))
@@ -1037,7 +1033,8 @@ skip_data:
total_rx_bytes += skb->len;
/* protocol */
idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc, ptype);
idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc,
fields.ptype);
/* send completed skb up the stack */
napi_gro_receive(rx_q->pp->p.napi, skb);
+4 -1
View File
@@ -391,7 +391,8 @@ enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG,
IGB_RING_FLAG_TX_DISABLED
IGB_RING_FLAG_TX_DISABLED,
IGB_RING_FLAG_RX_ALLOC_FAILED,
};
#define ring_uses_large_buffer(ring) \
@@ -722,6 +723,8 @@ enum igb_boards {
extern char igb_driver_name[];
void igb_set_queue_napi(struct igb_adapter *adapter, int q_idx,
struct napi_struct *napi);
int igb_xmit_xdp_ring(struct igb_adapter *adapter,
struct igb_ring *ring,
struct xdp_frame *xdpf);
+68 -10
View File
@@ -947,6 +947,9 @@ static int igb_request_msix(struct igb_adapter *adapter)
q_vector);
if (err)
goto err_free;
netif_napi_set_irq(&q_vector->napi,
adapter->msix_entries[vector].vector);
}
igb_configure_msix(adapter);
@@ -1194,7 +1197,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll);
netif_napi_add_config(adapter->netdev, &q_vector->napi, igb_poll,
v_idx);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -2096,6 +2100,22 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
wr32(E1000_CTRL_EXT, ctrl_ext);
}
void igb_set_queue_napi(struct igb_adapter *adapter, int vector,
struct napi_struct *napi)
{
struct igb_q_vector *q_vector = adapter->q_vector[vector];
if (q_vector->rx.ring)
netif_queue_set_napi(adapter->netdev,
q_vector->rx.ring->queue_index,
NETDEV_QUEUE_TYPE_RX, napi);
if (q_vector->tx.ring)
netif_queue_set_napi(adapter->netdev,
q_vector->tx.ring->queue_index,
NETDEV_QUEUE_TYPE_TX, napi);
}
/**
* igb_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -2103,6 +2123,7 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
int igb_up(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct napi_struct *napi;
int i;
/* hardware has been reset, we need to reload some things */
@@ -2110,8 +2131,11 @@ int igb_up(struct igb_adapter *adapter)
clear_bit(__IGB_DOWN, &adapter->state);
for (i = 0; i < adapter->num_q_vectors; i++)
napi_enable(&(adapter->q_vector[i]->napi));
for (i = 0; i < adapter->num_q_vectors; i++) {
napi = &adapter->q_vector[i]->napi;
napi_enable(napi);
igb_set_queue_napi(adapter, i, napi);
}
if (adapter->flags & IGB_FLAG_HAS_MSIX)
igb_configure_msix(adapter);
@@ -2181,6 +2205,7 @@ void igb_down(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
if (adapter->q_vector[i]) {
napi_synchronize(&adapter->q_vector[i]->napi);
igb_set_queue_napi(adapter, i, NULL);
napi_disable(&adapter->q_vector[i]->napi);
}
}
@@ -4113,8 +4138,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
static int __igb_open(struct net_device *netdev, bool resuming)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
struct napi_struct *napi;
int err;
int i;
@@ -4166,8 +4192,11 @@ static int __igb_open(struct net_device *netdev, bool resuming)
/* From here on the code is the same as igb_up() */
clear_bit(__IGB_DOWN, &adapter->state);
for (i = 0; i < adapter->num_q_vectors; i++)
napi_enable(&(adapter->q_vector[i]->napi));
for (i = 0; i < adapter->num_q_vectors; i++) {
napi = &adapter->q_vector[i]->napi;
napi_enable(napi);
igb_set_queue_napi(adapter, i, napi);
}
/* Clear any pending interrupts. */
rd32(E1000_TSICR);
@@ -5726,11 +5755,29 @@ no_wait:
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
for (i = 0; i < adapter->num_q_vectors; i++)
eics |= adapter->q_vector[i]->eims_value;
wr32(E1000_EICS, eics);
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
struct igb_ring *rx_ring;
if (!q_vector->rx.ring)
continue;
rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index];
if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
eics |= q_vector->eims_value;
clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
}
}
if (eics)
wr32(E1000_EICS, eics);
} else {
wr32(E1000_ICS, E1000_ICS_RXDMT0);
struct igb_ring *rx_ring = adapter->rx_ring[0];
if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
wr32(E1000_ICS, E1000_ICS_RXDMT0);
}
}
igb_spoof_check(adapter);
@@ -9061,6 +9108,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
@@ -9120,6 +9168,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -9136,6 +9185,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
__free_pages(page, igb_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_failed++;
set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -9674,8 +9724,11 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
rtnl_lock();
if (netif_running(netdev))
igb_down(adapter);
rtnl_unlock();
pci_disable_device(pdev);
/* Request a slot reset. */
@@ -9734,16 +9787,21 @@ static void igb_io_resume(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
rtnl_lock();
if (netif_running(netdev)) {
if (!test_bit(__IGB_DOWN, &adapter->state)) {
dev_dbg(&pdev->dev, "Resuming from non-fatal error, do nothing.\n");
rtnl_unlock();
return;
}
if (igb_up(adapter)) {
dev_err(&pdev->dev, "igb_up failed after reset\n");
rtnl_unlock();
return;
}
}
rtnl_unlock();
netif_device_attach(netdev);
+1
View File
@@ -415,6 +415,7 @@ int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_failed++;
set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
+2 -3
View File
@@ -394,12 +394,11 @@ extern char igc_driver_name[];
#define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_TSN_QAV_ENABLED BIT(18)
#define IGC_FLAG_TSN_LEGACY_ENABLED BIT(19)
#define IGC_FLAG_TSN_PREEMPT_ENABLED BIT(20)
#define IGC_FLAG_TSN_PREEMPT_ENABLED BIT(19)
#define IGC_FLAG_TSN_ANY_ENABLED \
(IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED | \
IGC_FLAG_TSN_LEGACY_ENABLED | IGC_FLAG_TSN_PREEMPT_ENABLED)
IGC_FLAG_TSN_PREEMPT_ENABLED)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
+17 -1
View File
@@ -6730,13 +6730,14 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
struct tc_mqprio_qopt_offload *mqprio)
{
struct igc_hw *hw = &adapter->hw;
int i;
int err, i;
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
if (!mqprio->qopt.num_tc) {
adapter->strict_priority_enable = false;
netdev_reset_tc(adapter->netdev);
goto apply;
}
@@ -6767,6 +6768,21 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
igc_save_mqprio_params(adapter, mqprio->qopt.num_tc,
mqprio->qopt.offset);
err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
if (err)
return err;
for (i = 0; i < adapter->num_tc; i++) {
err = netdev_set_tc_queue(adapter->netdev, i, 1,
adapter->queue_per_tc[i]);
if (err)
return err;
}
/* In case the card is configured with less than four queues. */
for (; i < IGC_MAX_TX_QUEUES; i++)
adapter->queue_per_tc[i] = i;
mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
apply:
+2 -37
View File
@@ -171,18 +171,14 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
{
unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED;
if (adapter->taprio_offload_enable)
new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
if (is_any_launchtime(adapter))
if (adapter->taprio_offload_enable || is_any_launchtime(adapter) ||
adapter->strict_priority_enable)
new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
if (is_cbs_enabled(adapter))
new_flags |= IGC_FLAG_TSN_QAV_ENABLED;
if (adapter->strict_priority_enable)
new_flags |= IGC_FLAG_TSN_LEGACY_ENABLED;
if (adapter->fpe.mmsv.pmac_enabled)
new_flags |= IGC_FLAG_TSN_PREEMPT_ENABLED;
@@ -320,16 +316,12 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_QBVCYCLET_S, 0);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
/* Reset mqprio TC configuration. */
netdev_reset_tc(adapter->netdev);
/* Restore the default Tx arbitration: Priority 0 has the highest
* priority and is assigned to queue 0 and so on and so forth.
*/
igc_tsn_tx_arb(adapter, queue_per_tc);
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
adapter->flags &= ~IGC_FLAG_TSN_LEGACY_ENABLED;
return 0;
}
@@ -394,37 +386,10 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
igc_tsn_set_retx_qbvfullthreshold(adapter);
if (adapter->strict_priority_enable) {
int err;
err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
if (err)
return err;
for (i = 0; i < adapter->num_tc; i++) {
err = netdev_set_tc_queue(adapter->netdev, i, 1,
adapter->queue_per_tc[i]);
if (err)
return err;
}
/* In case the card is configured with less than four queues. */
for (; i < IGC_MAX_TX_QUEUES; i++)
adapter->queue_per_tc[i] = i;
/* Configure queue priorities according to the user provided
* mapping.
*/
igc_tsn_tx_arb(adapter, adapter->queue_per_tc);
/* Enable legacy TSN mode which will do strict priority without
* any other TSN features.
*/
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN;
tqavctrl &= ~IGC_TQAVCTRL_ENHANCED_QAV;
wr32(IGC_TQAVCTRL, tqavctrl);
return 0;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
+2 -1
View File
@@ -10,7 +10,8 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
ixgbe_xsk.o ixgbe_e610.o devlink/devlink.o ixgbe_fw_update.o
ixgbe_xsk.o ixgbe_e610.o devlink/devlink.o ixgbe_fw_update.o \
devlink/region.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
@@ -6,5 +6,7 @@
struct ixgbe_adapter *ixgbe_allocate_devlink(struct device *dev);
int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter);
void ixgbe_devlink_init_regions(struct ixgbe_adapter *adapter);
void ixgbe_devlink_destroy_regions(struct ixgbe_adapter *adapter);
#endif /* _IXGBE_DEVLINK_H_ */
@@ -0,0 +1,290 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2025, Intel Corporation. */
#include "ixgbe.h"
#include "devlink.h"
#define IXGBE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
static const struct devlink_region_ops ixgbe_nvm_region_ops;
static const struct devlink_region_ops ixgbe_sram_region_ops;
static int ixgbe_devlink_parse_region(struct ixgbe_hw *hw,
const struct devlink_region_ops *ops,
bool *read_shadow_ram, u32 *nvm_size)
{
if (ops == &ixgbe_nvm_region_ops) {
*read_shadow_ram = false;
*nvm_size = hw->flash.flash_size;
} else if (ops == &ixgbe_sram_region_ops) {
*read_shadow_ram = true;
*nvm_size = hw->flash.sr_words * 2u;
} else {
return -EOPNOTSUPP;
}
return 0;
}
/**
* ixgbe_devlink_nvm_snapshot - Capture a snapshot of the NVM content
* @devlink: the devlink instance
* @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
* This function is called in response to the DEVLINK_CMD_REGION_NEW cmd.
*
* Capture a snapshot of the whole requested NVM region.
*
* No need to worry with freeing @data, devlink core takes care if it.
*
* Return: 0 on success, -EOPNOTSUPP for unsupported regions, -EBUSY when
* cannot lock NVM, -ENOMEM when cannot alloc mem and -EIO when error
* occurs during reading.
*/
static int ixgbe_devlink_nvm_snapshot(struct devlink *devlink,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack, u8 **data)
{
struct ixgbe_adapter *adapter = devlink_priv(devlink);
struct ixgbe_hw *hw = &adapter->hw;
bool read_shadow_ram;
u8 *nvm_data, *buf;
u32 nvm_size, left;
u8 num_blks;
int err;
err = ixgbe_devlink_parse_region(hw, ops, &read_shadow_ram, &nvm_size);
if (err)
return err;
nvm_data = kvzalloc(nvm_size, GFP_KERNEL);
if (!nvm_data)
return -ENOMEM;
num_blks = DIV_ROUND_UP(nvm_size, IXGBE_DEVLINK_READ_BLK_SIZE);
buf = nvm_data;
left = nvm_size;
for (int i = 0; i < num_blks; i++) {
u32 read_sz = min_t(u32, IXGBE_DEVLINK_READ_BLK_SIZE, left);
/* Need to acquire NVM lock during each loop run because the
* total period of reading whole NVM is longer than the maximum
* period the lock can be taken defined by the IXGBE_NVM_TIMEOUT.
*/
err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to acquire NVM semaphore");
kvfree(nvm_data);
return -EBUSY;
}
err = ixgbe_read_flat_nvm(hw, i * IXGBE_DEVLINK_READ_BLK_SIZE,
&read_sz, buf, read_shadow_ram);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to read RAM content");
ixgbe_release_nvm(hw);
kvfree(nvm_data);
return -EIO;
}
ixgbe_release_nvm(hw);
buf += read_sz;
left -= read_sz;
}
*data = nvm_data;
return 0;
}
/**
* ixgbe_devlink_devcaps_snapshot - Capture a snapshot of device capabilities
* @devlink: the devlink instance
* @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
* This function is called in response to the DEVLINK_CMD_REGION_NEW for
* the device-caps devlink region.
*
* Capture a snapshot of the device capabilities reported by firmware.
*
* No need to worry with freeing @data, devlink core takes care if it.
*
* Return: 0 on success, -ENOMEM when cannot alloc mem, or return code of
* the reading operation.
*/
static int ixgbe_devlink_devcaps_snapshot(struct devlink *devlink,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
u8 **data)
{
struct ixgbe_adapter *adapter = devlink_priv(devlink);
struct ixgbe_aci_cmd_list_caps_elem *caps;
struct ixgbe_hw *hw = &adapter->hw;
int err;
caps = kvzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
if (!caps)
return -ENOMEM;
err = ixgbe_aci_list_caps(hw, caps, IXGBE_ACI_MAX_BUFFER_SIZE, NULL,
ixgbe_aci_opc_list_dev_caps);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to read device capabilities");
kvfree(caps);
return err;
}
*data = (u8 *)caps;
return 0;
}
/**
* ixgbe_devlink_nvm_read - Read a portion of NVM flash content
* @devlink: the devlink instance
* @ops: the devlink region to snapshot
* @extack: extended ACK response structure
* @offset: the offset to start at
* @size: the amount to read
* @data: the data buffer to read into
*
* This function is called in response to DEVLINK_CMD_REGION_READ to directly
* read a section of the NVM contents.
*
* Read from either the nvm-flash region either shadow-ram region.
*
* Return: 0 on success, -EOPNOTSUPP for unsupported regions, -EBUSY when
* cannot lock NVM, -ERANGE when buffer limit exceeded and -EIO when error
* occurs during reading.
*/
static int ixgbe_devlink_nvm_read(struct devlink *devlink,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
u64 offset, u32 size, u8 *data)
{
struct ixgbe_adapter *adapter = devlink_priv(devlink);
struct ixgbe_hw *hw = &adapter->hw;
bool read_shadow_ram;
u32 nvm_size;
int err;
err = ixgbe_devlink_parse_region(hw, ops, &read_shadow_ram, &nvm_size);
if (err)
return err;
if (offset + size > nvm_size) {
NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size");
return -ERANGE;
}
err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
return -EBUSY;
}
err = ixgbe_read_flat_nvm(hw, (u32)offset, &size, data, read_shadow_ram);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
ixgbe_release_nvm(hw);
return -EIO;
}
ixgbe_release_nvm(hw);
return 0;
}
static const struct devlink_region_ops ixgbe_nvm_region_ops = {
.name = "nvm-flash",
.destructor = kvfree,
.snapshot = ixgbe_devlink_nvm_snapshot,
.read = ixgbe_devlink_nvm_read,
};
static const struct devlink_region_ops ixgbe_sram_region_ops = {
.name = "shadow-ram",
.destructor = kvfree,
.snapshot = ixgbe_devlink_nvm_snapshot,
.read = ixgbe_devlink_nvm_read,
};
static const struct devlink_region_ops ixgbe_devcaps_region_ops = {
.name = "device-caps",
.destructor = kvfree,
.snapshot = ixgbe_devlink_devcaps_snapshot,
};
/**
* ixgbe_devlink_init_regions - Initialize devlink regions
* @adapter: adapter instance
*
* Create devlink regions used to enable access to dump the contents of the
* flash memory of the device.
*/
void ixgbe_devlink_init_regions(struct ixgbe_adapter *adapter)
{
struct devlink *devlink = adapter->devlink;
struct device *dev = &adapter->pdev->dev;
u64 nvm_size, sram_size;
if (adapter->hw.mac.type != ixgbe_mac_e610)
return;
nvm_size = adapter->hw.flash.flash_size;
adapter->nvm_region = devl_region_create(devlink, &ixgbe_nvm_region_ops,
1, nvm_size);
if (IS_ERR(adapter->nvm_region)) {
dev_err(dev,
"Failed to create NVM devlink region, err %ld\n",
PTR_ERR(adapter->nvm_region));
adapter->nvm_region = NULL;
}
sram_size = adapter->hw.flash.sr_words * 2u;
adapter->sram_region = devl_region_create(devlink, &ixgbe_sram_region_ops,
1, sram_size);
if (IS_ERR(adapter->sram_region)) {
dev_err(dev,
"Failed to create shadow-ram devlink region, err %ld\n",
PTR_ERR(adapter->sram_region));
adapter->sram_region = NULL;
}
adapter->devcaps_region = devl_region_create(devlink,
&ixgbe_devcaps_region_ops,
10, IXGBE_ACI_MAX_BUFFER_SIZE);
if (IS_ERR(adapter->devcaps_region)) {
dev_err(dev,
"Failed to create device-caps devlink region, err %ld\n",
PTR_ERR(adapter->devcaps_region));
adapter->devcaps_region = NULL;
}
}
/**
* ixgbe_devlink_destroy_regions - Destroy devlink regions
* @adapter: adapter instance
*
* Remove previously created regions for this adapter instance.
*/
void ixgbe_devlink_destroy_regions(struct ixgbe_adapter *adapter)
{
if (adapter->hw.mac.type != ixgbe_mac_e610)
return;
if (adapter->nvm_region)
devl_region_destroy(adapter->nvm_region);
if (adapter->sram_region)
devl_region_destroy(adapter->sram_region);
if (adapter->devcaps_region)
devl_region_destroy(adapter->devcaps_region);
}
+3
View File
@@ -616,6 +616,9 @@ struct ixgbe_adapter {
struct mii_bus *mii_bus;
struct devlink *devlink;
struct devlink_port devlink_port;
struct devlink_region *nvm_region;
struct devlink_region *sram_region;
struct devlink_region *devcaps_region;
unsigned long state;
@@ -1484,6 +1484,35 @@ static int ixgbe_start_hw_e610(struct ixgbe_hw *hw)
return 0;
}
/**
* ixgbe_aci_set_port_id_led - set LED value for the given port
* @hw: pointer to the HW struct
* @orig_mode: set LED original mode
*
* Set LED value for the given port (0x06E9)
*
* Return: the exit code of the operation.
*/
int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
{
struct ixgbe_aci_cmd_set_port_id_led *cmd;
struct ixgbe_aci_desc desc;
cmd = &desc.params.set_port_id_led;
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
cmd->lport_num = (u8)hw->bus.func;
cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
if (orig_mode)
cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
else
cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
}
/**
* ixgbe_get_media_type_e610 - Gets media type
* @hw: pointer to the HW struct
@@ -36,6 +36,7 @@ int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
struct ixgbe_link_status *link);
int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode);
enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw);
int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait);
@@ -564,6 +564,22 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
}
}
static void ixgbe_set_pauseparam_finalize(struct net_device *netdev,
struct ixgbe_fc_info *fc)
{
struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
/* If the thing changed then we'll update and use new autoneg. */
if (memcmp(fc, &hw->fc, sizeof(*fc))) {
hw->fc = *fc;
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
else
ixgbe_reset(adapter);
}
}
static int ixgbe_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
@@ -592,15 +608,40 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
else
fc.requested_mode = ixgbe_fc_none;
/* if the thing changed then we'll update and use new autoneg */
if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
hw->fc = fc;
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
else
ixgbe_reset(adapter);
ixgbe_set_pauseparam_finalize(netdev, &fc);
return 0;
}
static int ixgbe_set_pauseparam_e610(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fc_info fc = hw->fc;
if (!ixgbe_device_supports_autoneg_fc(hw))
return -EOPNOTSUPP;
if (pause->autoneg == AUTONEG_DISABLE) {
netdev_info(netdev,
"Cannot disable autonegotiation on this device.\n");
return -EOPNOTSUPP;
}
fc.disable_fc_autoneg = false;
if (pause->rx_pause && pause->tx_pause)
fc.requested_mode = ixgbe_fc_full;
else if (pause->rx_pause)
fc.requested_mode = ixgbe_fc_rx_pause;
else if (pause->tx_pause)
fc.requested_mode = ixgbe_fc_tx_pause;
else
fc.requested_mode = ixgbe_fc_none;
ixgbe_set_pauseparam_finalize(netdev, &fc);
return 0;
}
@@ -2365,6 +2406,50 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
static int ixgbe_set_wol_acpi(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 grc;
if (ixgbe_wol_exclusion(adapter, wol))
return wol->wolopts ? -EOPNOTSUPP : 0;
/* disable APM wakeup */
grc = IXGBE_READ_REG(hw, IXGBE_GRC_X550EM_a);
grc &= ~IXGBE_GRC_APME;
IXGBE_WRITE_REG(hw, IXGBE_GRC_X550EM_a, grc);
/* erase existing filters */
IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
adapter->wol = 0;
if (wol->wolopts & WAKE_UCAST)
adapter->wol |= IXGBE_WUFC_EX;
if (wol->wolopts & WAKE_MCAST)
adapter->wol |= IXGBE_WUFC_MC;
if (wol->wolopts & WAKE_BCAST)
adapter->wol |= IXGBE_WUFC_BC;
IXGBE_WRITE_REG(hw, IXGBE_WUC, IXGBE_WUC_PME_EN);
IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wol);
hw->wol_enabled = adapter->wol;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
return 0;
}
static int ixgbe_set_wol_e610(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
if (wol->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
return ixgbe_set_wol_acpi(netdev, wol);
else
return ixgbe_set_wol(netdev, wol);
}
static int ixgbe_nway_reset(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
@@ -2406,6 +2491,26 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
return 0;
}
static int ixgbe_set_phys_id_e610(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
bool led_active;
switch (state) {
case ETHTOOL_ID_ACTIVE:
led_active = true;
break;
case ETHTOOL_ID_INACTIVE:
led_active = false;
break;
default:
return -EOPNOTSUPP;
}
return ixgbe_aci_set_port_id_led(&adapter->hw, !led_active);
}
static int ixgbe_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
@@ -3650,7 +3755,57 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_link_ksettings = ixgbe_set_link_ksettings,
};
static const struct ethtool_ops ixgbe_ethtool_ops_e610 = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = ixgbe_get_drvinfo,
.get_regs_len = ixgbe_get_regs_len,
.get_regs = ixgbe_get_regs,
.get_wol = ixgbe_get_wol,
.set_wol = ixgbe_set_wol_e610,
.nway_reset = ixgbe_nway_reset,
.get_link = ethtool_op_get_link,
.get_eeprom_len = ixgbe_get_eeprom_len,
.get_eeprom = ixgbe_get_eeprom,
.set_eeprom = ixgbe_set_eeprom,
.get_ringparam = ixgbe_get_ringparam,
.set_ringparam = ixgbe_set_ringparam,
.get_pause_stats = ixgbe_get_pause_stats,
.get_pauseparam = ixgbe_get_pauseparam,
.set_pauseparam = ixgbe_set_pauseparam_e610,
.get_msglevel = ixgbe_get_msglevel,
.set_msglevel = ixgbe_set_msglevel,
.self_test = ixgbe_diag_test,
.get_strings = ixgbe_get_strings,
.set_phys_id = ixgbe_set_phys_id_e610,
.get_sset_count = ixgbe_get_sset_count,
.get_ethtool_stats = ixgbe_get_ethtool_stats,
.get_coalesce = ixgbe_get_coalesce,
.set_coalesce = ixgbe_set_coalesce,
.get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
.get_rxfh_indir_size = ixgbe_rss_indir_size,
.get_rxfh_key_size = ixgbe_get_rxfh_key_size,
.get_rxfh = ixgbe_get_rxfh,
.set_rxfh = ixgbe_set_rxfh,
.get_eee = ixgbe_get_eee,
.set_eee = ixgbe_set_eee,
.get_channels = ixgbe_get_channels,
.set_channels = ixgbe_set_channels,
.get_priv_flags = ixgbe_get_priv_flags,
.set_priv_flags = ixgbe_set_priv_flags,
.get_ts_info = ixgbe_get_ts_info,
.get_module_info = ixgbe_get_module_info,
.get_module_eeprom = ixgbe_get_module_eeprom,
.get_link_ksettings = ixgbe_get_link_ksettings,
.set_link_ksettings = ixgbe_set_link_ksettings,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &ixgbe_ethtool_ops;
struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (adapter->hw.mac.type == ixgbe_mac_e610)
netdev->ethtool_ops = &ixgbe_ethtool_ops_e610;
else
netdev->ethtool_ops = &ixgbe_ethtool_ops;
}
@@ -11317,6 +11317,7 @@ static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)
ixgbe_devlink_register_port(adapter);
SET_NETDEV_DEVLINK_PORT(adapter->netdev,
&adapter->devlink_port);
ixgbe_devlink_init_regions(adapter);
devl_register(adapter->devlink);
devl_unlock(adapter->devlink);
@@ -11433,11 +11434,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap;
}
netdev->netdev_ops = &ixgbe_netdev_ops;
ixgbe_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
/* Setup hw api */
hw->mac.ops = *ii->mac_ops;
hw->mac.type = ii->mac;
@@ -11467,6 +11463,11 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->phy.mdio.mdio_read = ixgbe_mdio_read;
hw->phy.mdio.mdio_write = ixgbe_mdio_write;
netdev->netdev_ops = &ixgbe_netdev_ops;
ixgbe_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
/* setup the private structure */
err = ixgbe_sw_init(adapter, ii);
if (err)
@@ -11824,6 +11825,7 @@ skip_sriov:
if (err)
goto err_netdev;
ixgbe_devlink_init_regions(adapter);
devl_register(adapter->devlink);
devl_unlock(adapter->devlink);
return 0;
@@ -11882,6 +11884,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
netdev = adapter->netdev;
devl_lock(adapter->devlink);
devl_unregister(adapter->devlink);
ixgbe_devlink_destroy_regions(adapter);
ixgbe_dbg_adapter_exit(adapter);
set_bit(__IXGBE_REMOVING, &adapter->state);
@@ -223,6 +223,7 @@ enum ixgbe_aci_opc {
ixgbe_aci_opc_write_mdio = 0x06E5,
ixgbe_aci_opc_set_gpio_by_func = 0x06E6,
ixgbe_aci_opc_get_gpio_by_func = 0x06E7,
ixgbe_aci_opc_set_port_id_led = 0x06E9,
ixgbe_aci_opc_set_gpio = 0x06EC,
ixgbe_aci_opc_get_gpio = 0x06ED,
ixgbe_aci_opc_sff_eeprom = 0x06EE,
@@ -808,6 +809,18 @@ struct ixgbe_aci_cmd_get_link_topo_pin {
u8 rsvd[7];
};
/* Set Port Identification LED (direct, 0x06E9) */
struct ixgbe_aci_cmd_set_port_id_led {
u8 lport_num;
u8 lport_num_valid;
u8 ident_mode;
u8 rsvd[13];
};
#define IXGBE_ACI_PORT_ID_PORT_NUM_VALID BIT(0)
#define IXGBE_ACI_PORT_IDENT_LED_ORIG 0
#define IXGBE_ACI_PORT_IDENT_LED_BLINK BIT(0)
/* Read/Write SFF EEPROM command (indirect 0x06EE) */
struct ixgbe_aci_cmd_sff_eeprom {
u8 lport_num;
@@ -985,6 +998,7 @@ struct ixgbe_aci_desc {
struct ixgbe_aci_cmd_restart_an restart_an;
struct ixgbe_aci_cmd_get_link_status get_link_status;
struct ixgbe_aci_cmd_set_event_mask set_event_mask;
struct ixgbe_aci_cmd_set_port_id_led set_port_id_led;
struct ixgbe_aci_cmd_get_link_topo get_link_topo;
struct ixgbe_aci_cmd_get_link_topo_pin get_link_topo_pin;
struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param;