From patchwork Fri Jun 29 18:12:24 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Rahul Lakkireddy X-Patchwork-Id: 41996 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5177B1BB1C; Fri, 29 Jun 2018 20:14:03 +0200 (CEST) Received: from stargate.chelsio.com (stargate.chelsio.com [12.32.117.8]) by dpdk.org (Postfix) with ESMTP id 20FB21BB09 for ; Fri, 29 Jun 2018 20:13:58 +0200 (CEST) Received: from localhost (scalar.blr.asicdesigners.com [10.193.185.94]) by stargate.chelsio.com (8.13.8/8.13.8) with ESMTP id w5TIDt5P028638; Fri, 29 Jun 2018 11:13:55 -0700 From: Rahul Lakkireddy To: dev@dpdk.org Cc: shaguna@chelsio.com, indranil@chelsio.com, nirranjan@chelsio.com Date: Fri, 29 Jun 2018 23:42:24 +0530 Message-Id: <1f5c5539f1c9fbe3e7b8df4e7d0806a405b63911.1530295732.git.rahul.lakkireddy@chelsio.com> X-Mailer: git-send-email 2.5.3 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH 9/9] net/cxgbe: add support to redirect packets to egress physical port X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Shagun Agrawal Add action to redirect matched packets to specified egress physical port without sending them to host. Signed-off-by: Shagun Agrawal Signed-off-by: Rahul Lakkireddy --- drivers/net/cxgbe/base/t4_msg.h | 6 ++++++ drivers/net/cxgbe/cxgbe_filter.c | 19 +++++++++++++++++-- drivers/net/cxgbe/cxgbe_filter.h | 5 ++++- drivers/net/cxgbe/cxgbe_flow.c | 36 ++++++++++++++++++++++++++++++++++++ 4 files changed, 63 insertions(+), 3 deletions(-) diff --git a/drivers/net/cxgbe/base/t4_msg.h b/drivers/net/cxgbe/base/t4_msg.h index 7f4c98fb6..5d433c91c 100644 --- a/drivers/net/cxgbe/base/t4_msg.h +++ b/drivers/net/cxgbe/base/t4_msg.h @@ -113,6 +113,9 @@ struct work_request_hdr { #define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE) /* option 0 fields */ +#define S_TX_CHAN 2 +#define V_TX_CHAN(x) ((x) << S_TX_CHAN) + #define S_DELACK 5 #define V_DELACK(x) ((x) << S_DELACK) @@ -145,6 +148,9 @@ struct work_request_hdr { #define V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL) #define F_RX_CHANNEL V_RX_CHANNEL(1U) +#define S_CCTRL_ECN 27 +#define V_CCTRL_ECN(x) ((x) << S_CCTRL_ECN) + #define S_T5_OPT_2_VALID 31 #define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID) #define F_T5_OPT_2_VALID V_T5_OPT_2_VALID(1U) diff --git a/drivers/net/cxgbe/cxgbe_filter.c b/drivers/net/cxgbe/cxgbe_filter.c index 8c5890ea8..7f0d38001 100644 --- a/drivers/net/cxgbe/cxgbe_filter.c +++ b/drivers/net/cxgbe/cxgbe_filter.c @@ -71,6 +71,15 @@ int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs) #undef S #undef U + /* + * If the user is requesting that the filter action loop + * matching packets back out one of our ports, make sure that + * the egress port is in range. + */ + if (fs->action == FILTER_SWITCH && + fs->eport >= adapter->params.nports) + return -ERANGE; + /* * Don't allow various trivially obvious bogus out-of-range * values ... @@ -419,6 +428,7 @@ static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf, req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) | V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F) << 1) | + V_TX_CHAN(f->fs.eport) | V_ULP_MODE(ULP_MODE_NONE) | F_TCAM_BYPASS | F_NON_OFFLOAD); req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f))); @@ -427,7 +437,8 @@ static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf, F_T5_OPT_2_VALID | F_RX_CHANNEL | V_CONG_CNTRL((f->fs.action == FILTER_DROP) | - (f->fs.dirsteer << 1))); + (f->fs.dirsteer << 1)) | + V_CCTRL_ECN(f->fs.action == FILTER_SWITCH)); } /** @@ -460,6 +471,7 @@ static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf, req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) | V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F) << 1) | + V_TX_CHAN(f->fs.eport) | V_ULP_MODE(ULP_MODE_NONE) | F_TCAM_BYPASS | F_NON_OFFLOAD); req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f))); @@ -468,7 +480,8 @@ static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf, F_T5_OPT_2_VALID | F_RX_CHANNEL | V_CONG_CNTRL((f->fs.action == FILTER_DROP) | - (f->fs.dirsteer << 1))); + (f->fs.dirsteer << 1)) | + V_CCTRL_ECN(f->fs.action == FILTER_SWITCH)); } /** @@ -666,7 +679,9 @@ int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx) fwr->del_filter_to_l2tix = cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | + V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | + V_FW_FILTER_WR_TXCHAN(f->fs.eport) | V_FW_FILTER_WR_PRIO(f->fs.prio)); fwr->ethtype = cpu_to_be16(f->fs.val.ethtype); fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype); diff --git a/drivers/net/cxgbe/cxgbe_filter.h b/drivers/net/cxgbe/cxgbe_filter.h index fac1f75f9..af8fa7529 100644 --- a/drivers/net/cxgbe/cxgbe_filter.h +++ b/drivers/net/cxgbe/cxgbe_filter.h @@ -98,6 +98,8 @@ struct ch_filter_specification { uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */ uint32_t iq:10; /* ingress queue */ + uint32_t eport:2; /* egress port to switch packet out */ + /* Filter rule value/mask pairs. */ struct ch_filter_tuple val; struct ch_filter_tuple mask; @@ -105,7 +107,8 @@ struct ch_filter_specification { enum { FILTER_PASS = 0, /* default */ - FILTER_DROP + FILTER_DROP, + FILTER_SWITCH }; enum filter_type { diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c index 823bc720c..01c945f1b 100644 --- a/drivers/net/cxgbe/cxgbe_flow.c +++ b/drivers/net/cxgbe/cxgbe_flow.c @@ -326,6 +326,28 @@ static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx) return 0; } +static int +ch_rte_parse_atype_switch(const struct rte_flow_action *a, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_action_phy_port *port; + + switch (a->type) { + case RTE_FLOW_ACTION_TYPE_PHY_PORT: + port = (const struct rte_flow_action_phy_port *)a->conf; + fs->eport = port->index; + break; + default: + /* We are not supposed to come here */ + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "Action not supported"); + } + + return 0; +} + static int cxgbe_rtef_parse_actions(struct rte_flow *flow, const struct rte_flow_action action[], @@ -335,6 +357,7 @@ cxgbe_rtef_parse_actions(struct rte_flow *flow, const struct rte_flow_action_queue *q; const struct rte_flow_action *a; char abit = 0; + int ret; for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) { switch (a->type) { @@ -368,6 +391,19 @@ cxgbe_rtef_parse_actions(struct rte_flow *flow, case RTE_FLOW_ACTION_TYPE_COUNT: fs->hitcnts = 1; break; + case RTE_FLOW_ACTION_TYPE_PHY_PORT: + /* We allow multiple switch actions, but switch is + * not compatible with either queue or drop + */ + if (abit++ && fs->action != FILTER_SWITCH) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "overlapping action specified"); + ret = ch_rte_parse_atype_switch(a, fs, e); + if (ret) + return ret; + fs->action = FILTER_SWITCH; + break; default: /* Not supported action : return error */ return rte_flow_error_set(e, ENOTSUP,