From patchwork Mon Oct 9 14:44:39 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: =?utf-8?q?N=C3=A9lio_Laranjeiro?= X-Patchwork-Id: 29958 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E90EB1B1EE; Mon, 9 Oct 2017 16:45:33 +0200 (CEST) Received: from mail-wm0-f45.google.com (mail-wm0-f45.google.com [74.125.82.45]) by dpdk.org (Postfix) with ESMTP id 0A0781B1C1 for ; Mon, 9 Oct 2017 16:45:29 +0200 (CEST) Received: by mail-wm0-f45.google.com with SMTP id t69so23225469wmt.2 for ; Mon, 09 Oct 2017 07:45:29 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=6wind-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references :in-reply-to:references; bh=0Dlxz7F/+LnBVGSIbN2YCD0mGOK7HrsySn68JhqkbIM=; b=NWw3NzZdL7ZwS4ia/8cbuZzEZmJ1DRM6k5Gp62G5ryknK9ur8a1Wxr+IdDottMegDm tQZ0vE1CSsg+k1jg8OJtqSnHiawEtsM+sl2vlAON6KtZmyBjBVha5SpVpxUIk2uIFdtm C6AbZ6avN0gHdpoCDbP4Hgl4nci4V/fjeXxT3KGiBP9gdgfcr28rmj3Z/OzoVyglYyCx HiUlSrhhKPK+kEYpYc+CixqE5Otb5nGDn2LB2rQWGnt8s7SHxy5EPAGWCHA+6H3509Xc 8nGcfWmLuoCOyujh5XvgD/PUv0hnn0Ah464I4SbZSFkIjZGc7Fl5H5IYIj8Joxy74B8h 8H9A== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:in-reply-to:references; bh=0Dlxz7F/+LnBVGSIbN2YCD0mGOK7HrsySn68JhqkbIM=; b=fdVwlEh5LSan4BoYuHdxyI72mSTxrFQbO9n3iaPPTiDDCg2iYivqvD8XrVWuxhEHmG sXpzCbUOKFovKBUkWZDPmHzYsGO4aPKwYv6ruqEtist/eg2MapNHRRsyD/jTqYD9Oi37 GdH88sP3gtjBcIYm29SCr4GlXDXM5cXTuXb4yr3ZDpscvdvIgWeXfBUd4fcVuq9qRkHB osfMkM8OfFTfwQR4+x8KXl4iHKmheTcXZVj/VNzooLur8Ody845k2GwuOkuoE0I3GQpe RkvsTV6wGy2qIB/Gq9zDXKMfHEpo1dwzKW3ol/gXNxcS7wH0ksRGZL09RJxgbFt+hU8D XkGQ== X-Gm-Message-State: AMCzsaV/KYiXdY9hSZszHJXtnznS03rdf4ax2xJWxgF+TYaEaztz7uec MVKbQh/bVbhNnome6pzXKJNXr7xpUA== X-Google-Smtp-Source: AOwi7QD8VDND4XyZqjXsdTKkFP3L25+ME4KQV+25PuuJzffL6P2ZfZB0OKo0LzQQoGeU1WhdQ6CMUA== X-Received: by 10.28.133.12 with SMTP id h12mr9875695wmd.148.1507560328750; Mon, 09 Oct 2017 07:45:28 -0700 (PDT) Received: from ping.dev.6wind.com (host.78.145.23.62.rev.coltfrance.com. [62.23.145.78]) by smtp.gmail.com with ESMTPSA id x15sm6791495wma.32.2017.10.09.07.45.28 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Mon, 09 Oct 2017 07:45:28 -0700 (PDT) From: Nelio Laranjeiro To: dev@dpdk.org Cc: adrien.mazarguil@6wind.com, yskoh@mellanox.com, ferruh.yigit@intel.com Date: Mon, 9 Oct 2017 16:44:39 +0200 Message-Id: <8a0c9aed26380d4a7583cda460a752f742188656.1507560012.git.nelio.laranjeiro@6wind.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH v3 03/30] net/mlx5: prefix Rx structures and functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Prefix struct rxq_ctrl and associated functions with mlx5. Signed-off-by: Nelio Laranjeiro Acked-by: Yongseok Koh --- drivers/net/mlx5/mlx5.c | 8 ++--- drivers/net/mlx5/mlx5.h | 4 +-- drivers/net/mlx5/mlx5_flow.c | 12 ++++---- drivers/net/mlx5/mlx5_rxq.c | 57 +++++++++++++++++++----------------- drivers/net/mlx5/mlx5_rxtx.c | 14 ++++----- drivers/net/mlx5/mlx5_rxtx.h | 10 +++---- drivers/net/mlx5/mlx5_rxtx_vec_sse.c | 23 ++++++++------- drivers/net/mlx5/mlx5_stats.c | 2 +- drivers/net/mlx5/mlx5_vlan.c | 5 ++-- 9 files changed, 70 insertions(+), 65 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 64b16e5..40499b1 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -208,14 +208,14 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* XXX race condition if mlx5_rx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->rxqs_n); ++i) { - struct rxq *rxq = (*priv->rxqs)[i]; - struct rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl; if (rxq == NULL) continue; - rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); (*priv->rxqs)[i] = NULL; - rxq_cleanup(rxq_ctrl); + mlx5_rxq_cleanup(rxq_ctrl); rte_free(rxq_ctrl); } priv->rxqs_n = 0; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index adac5f4..ddaf227 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -130,7 +130,7 @@ struct priv { /* RX/TX queues. */ unsigned int rxqs_n; /* RX queues array size. */ unsigned int txqs_n; /* TX queues array size. */ - struct rxq *(*rxqs)[]; /* RX queues. */ + struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */ struct txq *(*txqs)[]; /* TX queues. */ /* Indirection tables referencing all RX WQs. */ struct ibv_rwq_ind_table *(*ind_tables)[]; @@ -290,7 +290,7 @@ int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *); int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *); int priv_flow_start(struct priv *); void priv_flow_stop(struct priv *); -int priv_flow_rxq_in_use(struct priv *, struct rxq *); +int priv_flow_rxq_in_use(struct priv *, struct mlx5_rxq_data *); /* mlx5_socket.c */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 266ae24..99dbd8c 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -99,7 +99,7 @@ struct rte_flow { uint32_t mark:1; /**< Set if the flow is marked. */ uint32_t drop:1; /**< Drop queue. */ uint64_t hash_fields; /**< Fields that participate in the hash. */ - struct rxq *rxqs[]; /**< Pointer to the queues array. */ + struct mlx5_rxq_data *rxqs[]; /**< Pointer to the queues array. */ }; /** Static initializer for items. */ @@ -1105,10 +1105,10 @@ priv_flow_create_action_queue(struct priv *priv, return NULL; } for (i = 0; i < flow->actions.queues_n; ++i) { - struct rxq_ctrl *rxq; + struct mlx5_rxq_ctrl *rxq; rxq = container_of((*priv->rxqs)[flow->actions.queues[i]], - struct rxq_ctrl, rxq); + struct mlx5_rxq_ctrl, rxq); wqs[i] = rxq->wq; rte_flow->rxqs[i] = &rxq->rxq; ++rte_flow->rxqs_n; @@ -1301,7 +1301,7 @@ priv_flow_destroy(struct priv *priv, claim_zero(ibv_destroy_rwq_ind_table(flow->ind_table)); if (flow->mark) { struct rte_flow *tmp; - struct rxq *rxq; + struct mlx5_rxq_data *rxq; uint32_t mark_n = 0; uint32_t queue_n; @@ -1321,7 +1321,7 @@ priv_flow_destroy(struct priv *priv, for (tqueue_n = 0; tqueue_n < tmp->rxqs_n; ++tqueue_n) { - struct rxq *trxq; + struct mlx5_rxq_data *trxq; trxq = tmp->rxqs[tqueue_n]; if (rxq == trxq) @@ -1585,7 +1585,7 @@ priv_flow_start(struct priv *priv) * Nonzero if the queue is used by a flow. */ int -priv_flow_rxq_in_use(struct priv *priv, struct rxq *rxq) +priv_flow_rxq_in_use(struct priv *priv, struct mlx5_rxq_data *rxq) { struct rte_flow *flow; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 5819e92..6e520fb 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -374,10 +374,10 @@ priv_create_hash_rxqs(struct priv *priv) priv->reta_idx_n); } for (i = 0; (i != priv->reta_idx_n); ++i) { - struct rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_ctrl *rxq_ctrl; rxq_ctrl = container_of((*priv->rxqs)[(*priv->reta_idx)[i]], - struct rxq_ctrl, rxq); + struct mlx5_rxq_ctrl, rxq); wqs[i] = rxq_ctrl->wq; } /* Get number of hash RX queues to configure. */ @@ -636,7 +636,7 @@ priv_rehash_flows(struct priv *priv) * 0 on success, errno value on failure. */ static int -rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n) +rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl, unsigned int elts_n) { const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; unsigned int i; @@ -678,7 +678,7 @@ rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n) (*rxq_ctrl->rxq.elts)[i] = buf; } if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { - struct rxq *rxq = &rxq_ctrl->rxq; + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; struct rte_mbuf *mbuf_init = &rxq->fake_mbuf; assert(rxq->elts_n == rxq->cqe_n); @@ -720,9 +720,9 @@ rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n) * Pointer to RX queue structure. */ static void -rxq_free_elts(struct rxq_ctrl *rxq_ctrl) +rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) { - struct rxq *rxq = &rxq_ctrl->rxq; + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; const uint16_t q_n = (1 << rxq->elts_n); const uint16_t q_mask = q_n - 1; uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi); @@ -756,7 +756,7 @@ rxq_free_elts(struct rxq_ctrl *rxq_ctrl) * Pointer to RX queue structure. */ void -rxq_cleanup(struct rxq_ctrl *rxq_ctrl) +mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) { DEBUG("cleaning up %p", (void *)rxq_ctrl); rxq_free_elts(rxq_ctrl); @@ -781,7 +781,7 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl) * 0 on success, errno value on failure. */ static inline int -rxq_setup(struct rxq_ctrl *tmpl) +rxq_setup(struct mlx5_rxq_ctrl *tmpl) { struct ibv_cq *ibcq = tmpl->cq; struct mlx5dv_cq cq_info; @@ -848,12 +848,12 @@ rxq_setup(struct rxq_ctrl *tmpl) * 0 on success, errno value on failure. */ static int -rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, +rxq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, struct rte_mempool *mp) { struct priv *priv = dev->data->dev_private; - struct rxq_ctrl tmpl = { + struct mlx5_rxq_ctrl tmpl = { .priv = priv, .socket = socket, .rxq = { @@ -1072,7 +1072,7 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, } /* Clean up rxq in case we're reinitializing it. */ DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq_ctrl); - rxq_cleanup(rxq_ctrl); + mlx5_rxq_cleanup(rxq_ctrl); /* Move mbuf pointers to dedicated storage area in RX queue. */ elts = (void *)(rxq_ctrl + 1); rte_memcpy(elts, tmpl.rxq.elts, sizeof(*elts)); @@ -1091,7 +1091,7 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, return 0; error: elts = tmpl.rxq.elts; - rxq_cleanup(&tmpl); + mlx5_rxq_cleanup(&tmpl); rte_free(elts); assert(ret > 0); return ret; @@ -1122,8 +1122,9 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct rte_mempool *mp) { struct priv *priv = dev->data->dev_private; - struct rxq *rxq = (*priv->rxqs)[idx]; - struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); const uint16_t desc_n = desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; int ret; @@ -1154,7 +1155,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, return -EEXIST; } (*priv->rxqs)[idx] = NULL; - rxq_cleanup(rxq_ctrl); + mlx5_rxq_cleanup(rxq_ctrl); /* Resize if rxq size is changed. */ if (rxq_ctrl->rxq.elts_n != log2above(desc)) { rxq_ctrl = rte_realloc(rxq_ctrl, @@ -1202,8 +1203,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, void mlx5_rx_queue_release(void *dpdk_rxq) { - struct rxq *rxq = (struct rxq *)dpdk_rxq; - struct rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq; + struct mlx5_rxq_ctrl *rxq_ctrl; struct priv *priv; unsigned int i; @@ -1212,7 +1213,7 @@ mlx5_rx_queue_release(void *dpdk_rxq) if (rxq == NULL) return; - rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); priv = rxq_ctrl->priv; priv_lock(priv); if (priv_flow_rxq_in_use(priv, rxq)) @@ -1225,7 +1226,7 @@ mlx5_rx_queue_release(void *dpdk_rxq) (*priv->rxqs)[i] = NULL; break; } - rxq_cleanup(rxq_ctrl); + mlx5_rxq_cleanup(rxq_ctrl); rte_free(rxq_ctrl); priv_unlock(priv); } @@ -1260,9 +1261,9 @@ priv_rx_intr_vec_enable(struct priv *priv) } intr_handle->type = RTE_INTR_HANDLE_EXT; for (i = 0; i != n; ++i) { - struct rxq *rxq = (*priv->rxqs)[i]; - struct rxq_ctrl *rxq_ctrl = - container_of(rxq, struct rxq_ctrl, rxq); + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); int fd; int flags; int rc; @@ -1328,7 +1329,7 @@ priv_rx_intr_vec_disable(struct priv *priv) * Sequence number per receive queue . */ static inline void -mlx5_arm_cq(struct rxq *rxq, int sq_n_rxq) +mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) { int sq_n = 0; uint32_t doorbell_hi; @@ -1359,8 +1360,9 @@ int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct priv *priv = mlx5_get_priv(dev); - struct rxq *rxq = (*priv->rxqs)[rx_queue_id]; - struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); int ret = 0; if (!rxq || !rxq_ctrl->channel) { @@ -1388,8 +1390,9 @@ int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct priv *priv = mlx5_get_priv(dev); - struct rxq *rxq = (*priv->rxqs)[rx_queue_id]; - struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); struct ibv_cq *ev_cq; void *ev_ctx; int ret; diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index c45ebee..ad1071b 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -65,11 +65,11 @@ static __rte_always_inline uint32_t rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe); static __rte_always_inline int -mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, +mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, uint16_t cqe_cnt, uint32_t *rss_hash); static __rte_always_inline uint32_t -rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe); +rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe); uint32_t mlx5_ptype_table[] __rte_cache_aligned = { [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */ @@ -282,7 +282,7 @@ mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset) int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) { - struct rxq *rxq = rx_queue; + struct mlx5_rxq_data *rxq = rx_queue; struct rxq_zip *zip = &rxq->zip; volatile struct mlx5_cqe *cqe; const unsigned int cqe_n = (1 << rxq->cqe_n); @@ -1647,7 +1647,7 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe) * with error. */ static inline int -mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, +mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, uint16_t cqe_cnt, uint32_t *rss_hash) { struct rxq_zip *zip = &rxq->zip; @@ -1758,7 +1758,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, * Offload flags (ol_flags) for struct rte_mbuf. */ static inline uint32_t -rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe) +rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) { uint32_t ol_flags = 0; uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); @@ -1797,7 +1797,7 @@ rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe) uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { - struct rxq *rxq = dpdk_rxq; + struct mlx5_rxq_data *rxq = dpdk_rxq; const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1; const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1; const unsigned int sges_n = rxq->sges_n; @@ -2037,7 +2037,7 @@ priv_check_vec_tx_support(struct priv *priv) } int __attribute__((weak)) -rxq_check_vec_support(struct rxq *rxq) +rxq_check_vec_support(struct mlx5_rxq_data *rxq) { (void)rxq; return -ENOTSUP; diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 37698ab..a86b6fb 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -90,7 +90,7 @@ struct rxq_zip { }; /* RX queue descriptor. */ -struct rxq { +struct mlx5_rxq_data { unsigned int csum:1; /* Enable checksum offloading. */ unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ unsigned int vlan_strip:1; /* Enable VLAN stripping. */ @@ -122,14 +122,14 @@ struct rxq { } __rte_cache_aligned; /* RX queue control descriptor. */ -struct rxq_ctrl { +struct mlx5_rxq_ctrl { struct priv *priv; /* Back pointer to private data. */ struct ibv_cq *cq; /* Completion Queue. */ struct ibv_wq *wq; /* Work Queue. */ struct ibv_mr *mr; /* Memory Region (for mp). */ struct ibv_comp_channel *channel; unsigned int socket; /* CPU socket ID for allocations. */ - struct rxq rxq; /* Data path structure. */ + struct mlx5_rxq_data rxq; /* Data path structure. */ }; /* Hash RX queue types. */ @@ -294,7 +294,7 @@ int priv_create_hash_rxqs(struct priv *); void priv_destroy_hash_rxqs(struct priv *); int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type); int priv_rehash_flows(struct priv *); -void rxq_cleanup(struct rxq_ctrl *); +void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *); int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, const struct rte_eth_rxconf *, struct rte_mempool *); void mlx5_rx_queue_release(void *); @@ -331,7 +331,7 @@ int mlx5_tx_descriptor_status(void *, uint16_t); /* Vectorized version of mlx5_rxtx.c */ int priv_check_raw_vec_tx_support(struct priv *); int priv_check_vec_tx_support(struct priv *); -int rxq_check_vec_support(struct rxq *); +int rxq_check_vec_support(struct mlx5_rxq_data *); int priv_check_vec_rx_support(struct priv *); uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t); uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t); diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c index 075dce9..b0c87bf 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c @@ -518,7 +518,7 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) * Number of packets to be stored. */ static inline void -rxq_copy_mbuf_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t n) +rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n) { const uint16_t q_mask = (1 << rxq->elts_n) - 1; struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask]; @@ -544,7 +544,7 @@ rxq_copy_mbuf_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t n) * Number of buffers to be replenished. */ static inline void -rxq_replenish_bulk_mbuf(struct rxq *rxq, uint16_t n) +rxq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n) { const uint16_t q_n = 1 << rxq->elts_n; const uint16_t q_mask = q_n - 1; @@ -583,7 +583,7 @@ rxq_replenish_bulk_mbuf(struct rxq *rxq, uint16_t n) * the title completion descriptor to be copied to the rest of mbufs. */ static inline void -rxq_cq_decompress_v(struct rxq *rxq, +rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, struct rte_mbuf **elts) { @@ -749,8 +749,8 @@ rxq_cq_decompress_v(struct rxq *rxq, * Pointer to array of packets to be filled. */ static inline void -rxq_cq_to_ptype_oflags_v(struct rxq *rxq, __m128i cqes[4], __m128i op_err, - struct rte_mbuf **pkts) +rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4], + __m128i op_err, struct rte_mbuf **pkts) { __m128i pinfo0, pinfo1; __m128i pinfo, ptype; @@ -884,7 +884,7 @@ rxq_cq_to_ptype_oflags_v(struct rxq *rxq, __m128i cqes[4], __m128i op_err, * Number of packets successfully received (<= pkts_n). */ static uint16_t -rxq_handle_pending_error(struct rxq *rxq, struct rte_mbuf **pkts, +rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { uint16_t n = 0; @@ -931,7 +931,7 @@ rxq_handle_pending_error(struct rxq *rxq, struct rte_mbuf **pkts, * Number of packets received including errors (<= pkts_n). */ static inline uint16_t -rxq_burst_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { const uint16_t q_n = 1 << rxq->cqe_n; const uint16_t q_mask = q_n - 1; @@ -1279,7 +1279,7 @@ rxq_burst_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { - struct rxq *rxq = dpdk_rxq; + struct mlx5_rxq_data *rxq = dpdk_rxq; uint16_t nb_rx; nb_rx = rxq_burst_v(rxq, pkts, pkts_n); @@ -1345,9 +1345,10 @@ priv_check_vec_tx_support(struct priv *priv) * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -rxq_check_vec_support(struct rxq *rxq) +rxq_check_vec_support(struct mlx5_rxq_data *rxq) { - struct rxq_ctrl *ctrl = container_of(rxq, struct rxq_ctrl, rxq); + struct mlx5_rxq_ctrl *ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0) return -ENOTSUP; @@ -1372,7 +1373,7 @@ priv_check_vec_rx_support(struct priv *priv) return -ENOTSUP; /* All the configured queues should support. */ for (i = 0; i < priv->rxqs_n; ++i) { - struct rxq *rxq = (*priv->rxqs)[i]; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; if (rxq_check_vec_support(rxq) < 0) break; diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 06348c8..3de3af8 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -329,7 +329,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) priv_lock(priv); /* Add software counters. */ for (i = 0; (i != priv->rxqs_n); ++i) { - struct rxq *rxq = (*priv->rxqs)[i]; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; if (rxq == NULL) continue; diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 36ffbba..0d91591 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -137,8 +137,9 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) static void priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) { - struct rxq *rxq = (*priv->rxqs)[idx]; - struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); struct ibv_wq_attr mod; uint16_t vlan_offloads = (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |