[1/2] net/sfc: offer indirect VXLAN encap action in transfer flows
Checks
Commit Message
Parsing inline action VXLAN_ENCAP repeating in many flows is
expensive, so offer support for its indirect version. Query
operation is not supported for this action. The next patch
will add a means to update the encapsulation header data.
Signed-off-by: Ivan Malov <ivan.malov@arknetworks.am>
Reviewed-by: Andy Moreton <andy.moreton@amd.com>
---
.mailmap | 2 +-
doc/guides/rel_notes/release_23_11.rst | 4 ++
drivers/net/sfc/sfc_flow.h | 1 +
drivers/net/sfc/sfc_mae.c | 51 ++++++++++++++++++++++++++
drivers/net/sfc/sfc_mae.h | 1 +
5 files changed, 58 insertions(+), 1 deletion(-)
Comments
On 8/10/2023 7:06 PM, Ivan Malov wrote:
> Parsing inline action VXLAN_ENCAP repeating in many flows is
> expensive, so offer support for its indirect version. Query
> operation is not supported for this action. The next patch
> will add a means to update the encapsulation header data.
>
> Signed-off-by: Ivan Malov <ivan.malov@arknetworks.am>
> Reviewed-by: Andy Moreton <andy.moreton@amd.com>
>
Hi Andrew,
Are you planning to review the set, or do you want me proceed with
Andy's review. Please advise how to progress.
On 8/10/23 21:06, Ivan Malov wrote:
> Parsing inline action VXLAN_ENCAP repeating in many flows is
> expensive, so offer support for its indirect version. Query
> operation is not supported for this action. The next patch
> will add a means to update the encapsulation header data.
>
> Signed-off-by: Ivan Malov <ivan.malov@arknetworks.am>
> Reviewed-by: Andy Moreton <andy.moreton@amd.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
On 9/22/2023 7:32 AM, Andrew Rybchenko wrote:
> On 8/10/23 21:06, Ivan Malov wrote:
>> Parsing inline action VXLAN_ENCAP repeating in many flows is
>> expensive, so offer support for its indirect version. Query
>> operation is not supported for this action. The next patch
>> will add a means to update the encapsulation header data.
>>
>> Signed-off-by: Ivan Malov <ivan.malov@arknetworks.am>
>> Reviewed-by: Andy Moreton <andy.moreton@amd.com>
>
> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
>
>
Series applied to dpdk-next-net/main, thanks.
@@ -106,7 +106,7 @@ Andriy Berestovskyy <aber@semihalf.com> <andriy.berestovskyy@caviumnetworks.com>
Andrzej Ostruszka <amo@semihalf.com> <aostruszka@marvell.com>
Andy Gospodarek <andrew.gospodarek@broadcom.com> <gospo@broadcom.com>
Andy Green <andy@warmcat.com>
-Andy Moreton <amoreton@xilinx.com> <amoreton@solarflare.com>
+Andy Moreton <andy.moreton@amd.com> <amoreton@xilinx.com> <amoreton@solarflare.com>
Andy Pei <andy.pei@intel.com>
Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Ankur Dwivedi <adwivedi@marvell.com> <ankur.dwivedi@caviumnetworks.com> <ankur.dwivedi@cavium.com>
@@ -55,6 +55,10 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Updated Solarflare network PMD.**
+
+ * Added support for transfer flow action INDIRECT with subtype VXLAN_ENCAP.
+
Removed Items
-------------
@@ -98,6 +98,7 @@ struct rte_flow_action_handle {
enum rte_flow_action_type type;
union {
+ struct sfc_mae_encap_header *encap_header;
struct sfc_mae_counter *counter;
};
};
@@ -663,6 +663,9 @@ sfc_mae_encap_header_attach(struct sfc_adapter *sa,
SFC_ASSERT(sfc_adapter_is_locked(sa));
TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
+ if (encap_header->indirect)
+ continue;
+
if (encap_header->size == bounce_eh->size &&
memcmp(encap_header->buf, bounce_eh->buf,
bounce_eh->size) == 0) {
@@ -4057,6 +4060,9 @@ sfc_mae_rule_parse_action_vxlan_encap(
/* Take care of the masks. */
sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
+ if (spec == NULL)
+ return 0;
+
rc = efx_mae_action_set_populate_encap(spec);
if (rc != 0) {
rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
@@ -4160,6 +4166,23 @@ sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
sfc_dbg(sa, "attaching to indirect_action=%p", entry);
switch (entry->type) {
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ if (ctx->encap_header != NULL) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot have multiple actions VXLAN_ENCAP in one flow");
+ }
+
+ rc = efx_mae_action_set_populate_encap(ctx->spec);
+ if (rc != 0) {
+ return rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "failed to add ENCAP to MAE action set");
+ }
+
+ ctx->encap_header = entry->encap_header;
+ ++(ctx->encap_header->refcnt);
+ break;
case RTE_FLOW_ACTION_TYPE_COUNT:
if (ft_rule_type != SFC_FT_RULE_NONE) {
return rte_flow_error_set(error, EINVAL,
@@ -5182,12 +5205,31 @@ sfc_mae_indir_action_create(struct sfc_adapter *sa,
struct rte_flow_action_handle *handle,
struct rte_flow_error *error)
{
+ struct sfc_mae *mae = &sa->mae;
+ bool custom_error = false;
int ret;
SFC_ASSERT(sfc_adapter_is_locked(sa));
SFC_ASSERT(handle != NULL);
switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ /* Cleanup after previous encap. header bounce buffer usage. */
+ sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
+
+ ret = sfc_mae_rule_parse_action_vxlan_encap(mae, action->conf,
+ NULL, error);
+ if (ret != 0) {
+ custom_error = true;
+ break;
+ }
+
+ ret = sfc_mae_encap_header_add(sa, &mae->bounce_eh,
+ &handle->encap_header);
+ if (ret == 0)
+ handle->encap_header->indirect = true;
+ break;
+
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = sfc_mae_rule_parse_action_count(sa, action->conf,
EFX_COUNTER_TYPE_ACTION,
@@ -5199,6 +5241,9 @@ sfc_mae_indir_action_create(struct sfc_adapter *sa,
ret = ENOTSUP;
}
+ if (custom_error)
+ return ret;
+
if (ret != 0) {
return rte_flow_error_set(error, ret,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -5219,6 +5264,12 @@ sfc_mae_indir_action_destroy(struct sfc_adapter *sa,
SFC_ASSERT(handle != NULL);
switch (handle->type) {
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ if (handle->encap_header->refcnt != 1)
+ goto fail;
+
+ sfc_mae_encap_header_del(sa, handle->encap_header);
+ break;
case RTE_FLOW_ACTION_TYPE_COUNT:
if (handle->counter->refcnt != 1)
goto fail;
@@ -60,6 +60,7 @@ TAILQ_HEAD(sfc_mae_mac_addrs, sfc_mae_mac_addr);
struct sfc_mae_encap_header {
TAILQ_ENTRY(sfc_mae_encap_header) entries;
unsigned int refcnt;
+ bool indirect;
uint8_t *buf;
size_t size;
efx_tunnel_protocol_t type;