[v5,31/80] net/ntnic: add hash API

Message ID 20241030213940.3470062-32-sil-plv@napatech.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series Provide flow filter and statistics support |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Serhii Iliushyk Oct. 30, 2024, 9:38 p.m. UTC
From: Danylo Vodopianov <dvo-plv@napatech.com>

The Hasher module calculates a configurable hash value
to be used internally by the FPGA.
The module support both Toeplitz and NT-hash.

Signed-off-by: Danylo Vodopianov <dvo-plv@napatech.com>
---
 doc/guides/nics/ntnic.rst                     |   2 +
 drivers/net/ntnic/include/flow_api.h          |  40 +
 drivers/net/ntnic/include/flow_api_engine.h   |  17 +
 drivers/net/ntnic/include/hw_mod_backend.h    |  20 +
 .../ntnic/include/stream_binary_flow_api.h    |  25 +
 drivers/net/ntnic/meson.build                 |   1 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 212 +++++
 drivers/net/ntnic/nthw/flow_api/flow_hasher.c | 156 ++++
 drivers/net/ntnic/nthw/flow_api/flow_hasher.h |  21 +
 drivers/net/ntnic/nthw/flow_api/flow_km.c     |  25 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   | 179 ++++
 .../profile_inline/flow_api_hw_db_inline.c    | 142 +++
 .../profile_inline/flow_api_hw_db_inline.h    |  11 +
 .../profile_inline/flow_api_profile_inline.c  | 846 +++++++++++++++++-
 .../profile_inline/flow_api_profile_inline.h  |   4 +
 drivers/net/ntnic/ntnic_mod_reg.h             |   4 +
 16 files changed, 1704 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_hasher.h
  

Patch

diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
index ed306e05b5..f2cb7a362a 100644
--- a/doc/guides/nics/ntnic.rst
+++ b/doc/guides/nics/ntnic.rst
@@ -54,6 +54,8 @@  Features
 - TX VLAN insertion via raw encap.
 - CAM and TCAM based matching.
 - Exact match of 140 million flows and policies.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+   verification.
 
 Limitations
 ~~~~~~~~~~~
diff --git a/drivers/net/ntnic/include/flow_api.h b/drivers/net/ntnic/include/flow_api.h
index edffd0a57a..2e96fa5bed 100644
--- a/drivers/net/ntnic/include/flow_api.h
+++ b/drivers/net/ntnic/include/flow_api.h
@@ -29,6 +29,37 @@  struct hw_mod_resource_s {
  */
 int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
 
+/**
+ * A structure used to configure the Receive Side Scaling (RSS) feature
+ * of an Ethernet port.
+ */
+struct nt_eth_rss_conf {
+	/**
+	 * In rte_eth_dev_rss_hash_conf_get(), the *rss_key_len* should be
+	 * greater than or equal to the *hash_key_size* which get from
+	 * rte_eth_dev_info_get() API. And the *rss_key* should contain at least
+	 * *hash_key_size* bytes. If not meet these requirements, the query
+	 * result is unreliable even if the operation returns success.
+	 *
+	 * In rte_eth_dev_rss_hash_update() or rte_eth_dev_configure(), if
+	 * *rss_key* is not NULL, the *rss_key_len* indicates the length of the
+	 * *rss_key* in bytes and it should be equal to *hash_key_size*.
+	 * If *rss_key* is NULL, drivers are free to use a random or a default key.
+	 */
+	uint8_t rss_key[MAX_RSS_KEY_LEN];
+	/**
+	 * Indicates the type of packets or the specific part of packets to
+	 * which RSS hashing is to be applied.
+	 */
+	uint64_t rss_hf;
+	/**
+	 * Hash algorithm.
+	 */
+	enum rte_eth_hash_function algorithm;
+};
+
+int sprint_nt_rss_mask(char *str, uint16_t str_len, const char *prefix, uint64_t hash_mask);
+
 struct flow_eth_dev {
 	/* NIC that owns this port device */
 	struct flow_nic_dev *ndev;
@@ -49,6 +80,11 @@  struct flow_eth_dev {
 	struct flow_eth_dev *next;
 };
 
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
 /* registered NIC backends */
 struct flow_nic_dev {
 	uint8_t adapter_no;     /* physical adapter no in the host system */
@@ -191,4 +227,8 @@  void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
 int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index);
 int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index);
 
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx, enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+	struct nt_eth_rss_conf rss_conf);
+
 #endif
diff --git a/drivers/net/ntnic/include/flow_api_engine.h b/drivers/net/ntnic/include/flow_api_engine.h
index a0f02f4e8a..e52363f04e 100644
--- a/drivers/net/ntnic/include/flow_api_engine.h
+++ b/drivers/net/ntnic/include/flow_api_engine.h
@@ -129,6 +129,7 @@  struct km_flow_def_s {
 	int bank_used;
 	uint32_t *cuckoo_moves;	/* for CAM statistics only */
 	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
 
 	/* TCAM specific bank management */
 	struct tcam_distrib_s *tcam_dist;
@@ -136,6 +137,17 @@  struct km_flow_def_s {
 	int tcam_record;
 };
 
+/*
+ * RSS configuration, see struct rte_flow_action_rss
+ */
+struct hsh_def_s {
+	enum rte_eth_hash_function func;	/* RSS hash function to apply */
+	/* RSS hash types, see definition of RTE_ETH_RSS_* for hash calculation options */
+	uint64_t types;
+	uint32_t key_len;	/* Hash key length in bytes. */
+	const uint8_t *key;	/* Hash key. */
+};
+
 /*
  * Tunnel encapsulation header definition
  */
@@ -247,6 +259,11 @@  struct nic_flow_def {
 	 * Key Matcher flow definitions
 	 */
 	struct km_flow_def_s km;
+
+	/*
+	 * Hash module RSS definitions
+	 */
+	struct hsh_def_s hsh;
 };
 
 enum flow_handle_type {
diff --git a/drivers/net/ntnic/include/hw_mod_backend.h b/drivers/net/ntnic/include/hw_mod_backend.h
index 26903f2183..cee148807a 100644
--- a/drivers/net/ntnic/include/hw_mod_backend.h
+++ b/drivers/net/ntnic/include/hw_mod_backend.h
@@ -149,14 +149,27 @@  enum km_flm_if_select_e {
 	int debug
 
 enum frame_offs_e {
+	DYN_SOF = 0,
 	DYN_L2 = 1,
 	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
 	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
 	DYN_L4 = 7,
 	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
 	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
 	DYN_TUN_L4 = 16,
 	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
 	SB_VNI = SWX_INFO | 1,
 	SB_MAC_PORT = SWX_INFO | 2,
 	SB_KCC_ID = SWX_INFO | 3
@@ -227,6 +240,11 @@  enum {
 };
 
 
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_5TUPLE = 8,
+};
+
 enum {
 	CPY_SELECT_DSCP_IPV4 = 0,
 	CPY_SELECT_DSCP_IPV6 = 1,
@@ -670,6 +688,8 @@  int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
 void hw_mod_hsh_free(struct flow_api_backend_s *be);
 int hw_mod_hsh_reset(struct flow_api_backend_s *be);
 int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field, uint32_t index,
+	uint32_t word_off, uint32_t value);
 
 struct qsl_func_s {
 	COMMON_FUNC_INFO_S;
diff --git a/drivers/net/ntnic/include/stream_binary_flow_api.h b/drivers/net/ntnic/include/stream_binary_flow_api.h
index 8097518d61..e5fe686d99 100644
--- a/drivers/net/ntnic/include/stream_binary_flow_api.h
+++ b/drivers/net/ntnic/include/stream_binary_flow_api.h
@@ -12,6 +12,31 @@ 
 /* Max RSS hash key length in bytes */
 #define MAX_RSS_KEY_LEN 40
 
+/* NT specific MASKs for RSS configuration */
+/* NOTE: Masks are required for correct RSS configuration, do not modify them! */
+#define NT_ETH_RSS_IPV4_MASK                                                                      \
+	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER |              \
+	 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV4_TCP |                           \
+	 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+
+#define NT_ETH_RSS_IPV6_MASK                                                                      \
+	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_IPV6_EX |                         \
+	 RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER |     \
+	 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |                           \
+	 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+
+#define NT_ETH_RSS_IP_MASK                                                                        \
+	(NT_ETH_RSS_IPV4_MASK | NT_ETH_RSS_IPV6_MASK | RTE_ETH_RSS_L3_SRC_ONLY |                  \
+	 RTE_ETH_RSS_L3_DST_ONLY)
+
+/* List of all RSS flags supported for RSS calculation offload */
+#define NT_ETH_RSS_OFFLOAD_MASK                                                                   \
+	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_L2_PAYLOAD | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |            \
+	 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY | \
+	 RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L3_SRC_ONLY |            \
+	 RTE_ETH_RSS_L3_DST_ONLY | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_LEVEL_MASK |                    \
+	 RTE_ETH_RSS_IPV4_CHKSUM | RTE_ETH_RSS_L4_CHKSUM | RTE_ETH_RSS_PORT | RTE_ETH_RSS_GTPU)
+
 /*
  * Flow frontend for binary programming interface
  */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index e1fef37ccb..d7e6d05556 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -56,6 +56,7 @@  sources = files(
         'nthw/flow_api/profile_inline/flow_api_hw_db_inline.c',
         'nthw/flow_api/flow_backend/flow_backend.c',
         'nthw/flow_api/flow_filter.c',
+        'nthw/flow_api/flow_hasher.c',
         'nthw/flow_api/flow_kcc.c',
         'nthw/flow_api/flow_km.c',
         'nthw/flow_api/hw_mod/hw_mod_backend.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
index 22d7905c62..577b1c83b5 100644
--- a/drivers/net/ntnic/nthw/flow_api/flow_api.c
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -2,6 +2,8 @@ 
  * SPDX-License-Identifier: BSD-3-Clause
  * Copyright(c) 2023 Napatech A/S
  */
+#include "ntlog.h"
+#include "nt_util.h"
 
 #include "flow_api_engine.h"
 #include "flow_api_nic_setup.h"
@@ -10,6 +12,11 @@ 
 #include "flow_api.h"
 #include "flow_filter.h"
 
+#define RSS_TO_STRING(name) \
+	{                \
+		name, #name   \
+	}
+
 const char *dbg_res_descr[] = {
 	/* RES_QUEUE */ "RES_QUEUE",
 	/* RES_CAT_CFN */ "RES_CAT_CFN",
@@ -773,6 +780,211 @@  void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
 	return ndev->be.be_dev;
 }
 
+/* Information for a given RSS type. */
+struct rss_type_info {
+	uint64_t rss_type;
+	const char *str;
+};
+
+static struct rss_type_info rss_to_string[] = {
+	/* RTE_BIT64(2)   IPv4 dst + IPv4 src */
+	RSS_TO_STRING(RTE_ETH_RSS_IPV4),
+	/* RTE_BIT64(3)   IPv4 dst + IPv4 src + Identification of group of fragments  */
+	RSS_TO_STRING(RTE_ETH_RSS_FRAG_IPV4),
+	/* RTE_BIT64(4)   IPv4 dst + IPv4 src + L4 protocol */
+	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_TCP),
+	/* RTE_BIT64(5)   IPv4 dst + IPv4 src + L4 protocol */
+	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_UDP),
+	/* RTE_BIT64(6)   IPv4 dst + IPv4 src + L4 protocol */
+	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_SCTP),
+	/* RTE_BIT64(7)   IPv4 dst + IPv4 src + L4 protocol */
+	RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_OTHER),
+	/*
+	 * RTE_BIT64(14)  128-bits of L2 payload starting after src MAC, i.e. including optional
+	 * VLAN tag and ethertype. Overrides all L3 and L4 flags at the same level, but inner
+	 * L2 payload can be combined with outer S-VLAN and GTPU TEID flags.
+	 */
+	RSS_TO_STRING(RTE_ETH_RSS_L2_PAYLOAD),
+	/* RTE_BIT64(18)  L4 dst + L4 src + L4 protocol - see comment of RTE_ETH_RSS_L4_CHKSUM */
+	RSS_TO_STRING(RTE_ETH_RSS_PORT),
+	/* RTE_BIT64(19)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_VXLAN),
+	/* RTE_BIT64(20)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_GENEVE),
+	/* RTE_BIT64(21)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_NVGRE),
+	/* RTE_BIT64(23)  GTP TEID - always from outer GTPU header */
+	RSS_TO_STRING(RTE_ETH_RSS_GTPU),
+	/* RTE_BIT64(24)  MAC dst + MAC src */
+	RSS_TO_STRING(RTE_ETH_RSS_ETH),
+	/* RTE_BIT64(25)  outermost VLAN ID + L4 protocol */
+	RSS_TO_STRING(RTE_ETH_RSS_S_VLAN),
+	/* RTE_BIT64(26)  innermost VLAN ID + L4 protocol */
+	RSS_TO_STRING(RTE_ETH_RSS_C_VLAN),
+	/* RTE_BIT64(27)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_ESP),
+	/* RTE_BIT64(28)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_AH),
+	/* RTE_BIT64(29)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_L2TPV3),
+	/* RTE_BIT64(30)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_PFCP),
+	/* RTE_BIT64(31)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_PPPOE),
+	/* RTE_BIT64(32)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_ECPRI),
+	/* RTE_BIT64(33)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_MPLS),
+	/* RTE_BIT64(34)  IPv4 Header checksum + L4 protocol */
+	RSS_TO_STRING(RTE_ETH_RSS_IPV4_CHKSUM),
+
+	/*
+	 * if combined with RTE_ETH_RSS_NONFRAG_IPV4_[TCP|UDP|SCTP] then
+	 *   L4 protocol + chosen protocol header Checksum
+	 * else
+	 *   error
+	 */
+	/* RTE_BIT64(35) */
+	RSS_TO_STRING(RTE_ETH_RSS_L4_CHKSUM),
+#ifndef ANDROMEDA_DPDK_21_11
+	/* RTE_BIT64(36)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_L2TPV2),
+#endif
+
+	{ RTE_BIT64(37), "unknown_RTE_BIT64(37)" },
+	{ RTE_BIT64(38), "unknown_RTE_BIT64(38)" },
+	{ RTE_BIT64(39), "unknown_RTE_BIT64(39)" },
+	{ RTE_BIT64(40), "unknown_RTE_BIT64(40)" },
+	{ RTE_BIT64(41), "unknown_RTE_BIT64(41)" },
+	{ RTE_BIT64(42), "unknown_RTE_BIT64(42)" },
+	{ RTE_BIT64(43), "unknown_RTE_BIT64(43)" },
+	{ RTE_BIT64(44), "unknown_RTE_BIT64(44)" },
+	{ RTE_BIT64(45), "unknown_RTE_BIT64(45)" },
+	{ RTE_BIT64(46), "unknown_RTE_BIT64(46)" },
+	{ RTE_BIT64(47), "unknown_RTE_BIT64(47)" },
+	{ RTE_BIT64(48), "unknown_RTE_BIT64(48)" },
+	{ RTE_BIT64(49), "unknown_RTE_BIT64(49)" },
+
+	/* RTE_BIT64(50)  outermost encapsulation */
+	RSS_TO_STRING(RTE_ETH_RSS_LEVEL_OUTERMOST),
+	/* RTE_BIT64(51)  innermost encapsulation */
+	RSS_TO_STRING(RTE_ETH_RSS_LEVEL_INNERMOST),
+
+	/* RTE_BIT64(52)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE96),
+	/* RTE_BIT64(53)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE64),
+	/* RTE_BIT64(54)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE56),
+	/* RTE_BIT64(55)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE48),
+	/* RTE_BIT64(56)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE40),
+	/* RTE_BIT64(57)  Not supported */
+	RSS_TO_STRING(RTE_ETH_RSS_L3_PRE32),
+
+	/* RTE_BIT64(58) */
+	RSS_TO_STRING(RTE_ETH_RSS_L2_DST_ONLY),
+	/* RTE_BIT64(59) */
+	RSS_TO_STRING(RTE_ETH_RSS_L2_SRC_ONLY),
+	/* RTE_BIT64(60) */
+	RSS_TO_STRING(RTE_ETH_RSS_L4_DST_ONLY),
+	/* RTE_BIT64(61) */
+	RSS_TO_STRING(RTE_ETH_RSS_L4_SRC_ONLY),
+	/* RTE_BIT64(62) */
+	RSS_TO_STRING(RTE_ETH_RSS_L3_DST_ONLY),
+	/* RTE_BIT64(63) */
+	RSS_TO_STRING(RTE_ETH_RSS_L3_SRC_ONLY),
+};
+
+int sprint_nt_rss_mask(char *str, uint16_t str_len, const char *prefix, uint64_t hash_mask)
+{
+	if (str == NULL || str_len == 0)
+		return -1;
+
+	memset(str, 0x0, str_len);
+	uint16_t str_end = 0;
+	const struct rss_type_info *start = rss_to_string;
+
+	for (const struct rss_type_info *p = start; p != start + ARRAY_SIZE(rss_to_string); ++p) {
+		if (p->rss_type & hash_mask) {
+			if (strlen(prefix) + strlen(p->str) < (size_t)(str_len - str_end)) {
+				snprintf(str + str_end, str_len - str_end, "%s", prefix);
+				str_end += strlen(prefix);
+				snprintf(str + str_end, str_len - str_end, "%s", p->str);
+				str_end += strlen(p->str);
+
+			} else {
+				return -1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Hash
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx, enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0, DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0, -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0, DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0, DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0, 0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1, 0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2, 0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3, 0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4, 0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5, 0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6, 0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7, 0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8, 0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0, 0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0, HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK, hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER, "Set IPv6 5-tuple hasher with adaptive IPv4 hashing");
+		break;
+
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+	struct nt_eth_rss_conf rss_conf)
+{
+	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
+
+	if (profile_inline_ops == NULL) {
+		NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
+		return -1;
+	}
+
+	return profile_inline_ops->flow_nic_set_hasher_fields_inline(ndev, hsh_idx, rss_conf);
+}
+
 static const struct flow_filter_ops ops = {
 	.flow_filter_init = flow_filter_init,
 	.flow_filter_done = flow_filter_done,
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_hasher.c
new file mode 100644
index 0000000000..86dfc16e79
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_hasher.c
@@ -0,0 +1,156 @@ 
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <math.h>
+
+#include "flow_hasher.h"
+
+static uint32_t shuffle(uint32_t x)
+{
+	return ((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) | ((x & 0x15555555) << 3) |
+		((x & 0x40000000) >> 29);
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return (x >> s) | ((~x) << (32 - s));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) | (x1 & ~y1 & ~x2 & y2) |
+		(~x1 & y1 & x2 & ~y2) | (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return ((x >> 3) | m) ^ ((x << 29) & m);
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return ((x >> 13) | m) ^ ((x << 19) & m);
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return ((x >> 15) | m) ^ ((x << 17) & m);
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return ((x >> 27) | m) ^ ((x << 5) & m);
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return ((x & 0x0000000200000002) << 29) | ((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) | ((x & 0x4000000040000000) >> 29);
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return ((uint64_t)x << 32) | y;
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) | (x1 & ~y1 & ~x2 & y2) |
+		(~x1 & y1 & x2 & ~y2) | (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/*
+	 * 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0
+	 *  \./     \./     \./     \./     \./     \./     \./     \./
+	 *   0       1       2       3       4       5       6       7     Layer 1
+	 *    \__.__/         \__.__/         \__.__/         \__.__/
+	 *       0               1               2               3         Layer 2
+	 *        \______.______/                 \______.______/
+	 *               0                               1                 Layer 3
+	 *                \______________.______________/
+	 *                               0                                 Layer 4
+	 *                              / \
+	 *                              \./
+	 *                               0                                 Layer 5
+	 *                              / \
+	 *                              \./                                Layer 6
+	 *                             value
+	 */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+		mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+		mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_hasher.h
new file mode 100644
index 0000000000..15de8e9933
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_hasher.h
@@ -0,0 +1,21 @@ 
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int _banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif	/* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_km.c
index 30d6ea728e..f79919cb81 100644
--- a/drivers/net/ntnic/nthw/flow_api/flow_km.c
+++ b/drivers/net/ntnic/nthw/flow_api/flow_km.c
@@ -9,6 +9,7 @@ 
 #include "hw_mod_backend.h"
 #include "flow_api_engine.h"
 #include "nt_util.h"
+#include "flow_hasher.h"
 
 #define MAX_QWORDS 2
 #define MAX_SWORDS 2
@@ -75,10 +76,25 @@  static int tcam_find_mapping(struct km_flow_def_s *km);
 
 void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
 {
+	/*
+	 * KM entries occupied in CAM - to manage the cuckoo shuffling
+	 * and manage CAM population and usage
+	 * KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1,
+			(size_t)CAM_ENTRIES + sizeof(uint32_t) + (size_t)TCAM_ENTRIES +
+			sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER, "Allocate NIC DEV CAM and TCAM record manager");
+	}
+
 	km->cam_dist = (struct cam_distrib_s *)*handle;
 	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
 	km->tcam_dist =
 		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES + sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks, km->be->km.nb_cam_records);
 }
 
 void km_free_ndev_resource_management(void **handle)
@@ -839,9 +855,18 @@  static int move_cuckoo_index_level(struct km_flow_def_s *km_parent, int bank_idx
 static int km_write_data_to_cam(struct km_flow_def_s *km)
 {
 	int res = 0;
+	int val[MAX_BANKS];
 	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
 	assert(km->cam_dist);
 
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+
 	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]", km->record_indexes[0],
 		km->record_indexes[1], km->record_indexes[2]);
 
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
index df5c00ac42..1750d09afb 100644
--- a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -89,3 +89,182 @@  int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count
 
 	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
 }
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be, enum hw_hsh_e field, uint32_t index,
+	uint32_t word_off, uint32_t *value, int get)
+{
+	if (index >= be->hsh.nb_rcp) {
+		INDEX_TOO_LARGE_LOG;
+		return INDEX_TOO_LARGE;
+	}
+
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+				sizeof(struct hsh_v5_rcp_s));
+			break;
+
+		case HW_HSH_RCP_COMPARE:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if ((unsigned int)word_off >= be->hsh.nb_rcp) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			DO_COMPARE_INDEXS(be->hsh.v5.rcp, struct hsh_v5_rcp_s, index, word_off);
+			break;
+
+		case HW_HSH_RCP_FIND:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if ((unsigned int)word_off >= be->hsh.nb_rcp) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			FIND_EQUAL_INDEX(be->hsh.v5.rcp, struct hsh_v5_rcp_s, index, word_off,
+				be->hsh.nb_rcp);
+			break;
+
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			GET_SET(be->hsh.v5.rcp[index].load_dist_type, value);
+			break;
+
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE) {
+				WORD_OFF_TOO_LARGE_LOG;
+				return WORD_OFF_TOO_LARGE;
+			}
+
+			GET_SET(be->hsh.v5.rcp[index].mac_port_mask[word_off], value);
+			break;
+
+		case HW_HSH_RCP_SORT:
+			GET_SET(be->hsh.v5.rcp[index].sort, value);
+			break;
+
+		case HW_HSH_RCP_QW0_PE:
+			GET_SET(be->hsh.v5.rcp[index].qw0_pe, value);
+			break;
+
+		case HW_HSH_RCP_QW0_OFS:
+			GET_SET_SIGNED(be->hsh.v5.rcp[index].qw0_ofs, value);
+			break;
+
+		case HW_HSH_RCP_QW4_PE:
+			GET_SET(be->hsh.v5.rcp[index].qw4_pe, value);
+			break;
+
+		case HW_HSH_RCP_QW4_OFS:
+			GET_SET_SIGNED(be->hsh.v5.rcp[index].qw4_ofs, value);
+			break;
+
+		case HW_HSH_RCP_W8_PE:
+			GET_SET(be->hsh.v5.rcp[index].w8_pe, value);
+			break;
+
+		case HW_HSH_RCP_W8_OFS:
+			GET_SET_SIGNED(be->hsh.v5.rcp[index].w8_ofs, value);
+			break;
+
+		case HW_HSH_RCP_W8_SORT:
+			GET_SET(be->hsh.v5.rcp[index].w8_sort, value);
+			break;
+
+		case HW_HSH_RCP_W9_PE:
+			GET_SET(be->hsh.v5.rcp[index].w9_pe, value);
+			break;
+
+		case HW_HSH_RCP_W9_OFS:
+			GET_SET_SIGNED(be->hsh.v5.rcp[index].w9_ofs, value);
+			break;
+
+		case HW_HSH_RCP_W9_SORT:
+			GET_SET(be->hsh.v5.rcp[index].w9_sort, value);
+			break;
+
+		case HW_HSH_RCP_W9_P:
+			GET_SET(be->hsh.v5.rcp[index].w9_p, value);
+			break;
+
+		case HW_HSH_RCP_P_MASK:
+			GET_SET(be->hsh.v5.rcp[index].p_mask, value);
+			break;
+
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE) {
+				WORD_OFF_TOO_LARGE_LOG;
+				return WORD_OFF_TOO_LARGE;
+			}
+
+			GET_SET(be->hsh.v5.rcp[index].word_mask[word_off], value);
+			break;
+
+		case HW_HSH_RCP_SEED:
+			GET_SET(be->hsh.v5.rcp[index].seed, value);
+			break;
+
+		case HW_HSH_RCP_TNL_P:
+			GET_SET(be->hsh.v5.rcp[index].tnl_p, value);
+			break;
+
+		case HW_HSH_RCP_HSH_VALID:
+			GET_SET(be->hsh.v5.rcp[index].hsh_valid, value);
+			break;
+
+		case HW_HSH_RCP_HSH_TYPE:
+			GET_SET(be->hsh.v5.rcp[index].hsh_type, value);
+			break;
+
+		case HW_HSH_RCP_TOEPLITZ:
+			GET_SET(be->hsh.v5.rcp[index].toeplitz, value);
+			break;
+
+		case HW_HSH_RCP_K:
+			if (word_off > HSH_RCP_KEY_SIZE) {
+				WORD_OFF_TOO_LARGE_LOG;
+				return WORD_OFF_TOO_LARGE;
+			}
+
+			GET_SET(be->hsh.v5.rcp[index].k[word_off], value);
+			break;
+
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			GET_SET(be->hsh.v5.rcp[index].auto_ipv4_mask, value);
+			break;
+
+		default:
+			UNSUP_FIELD_LOG;
+			return UNSUP_FIELD;
+		}
+
+		break;
+
+	/* end case 5 */
+	default:
+		UNSUP_VER_LOG;
+		return UNSUP_VER;
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field, uint32_t index,
+	uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.c b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.c
index 4737460cdf..068c890b45 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.c
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.c
@@ -30,9 +30,15 @@  struct hw_db_inline_resource_db {
 		int ref;
 	} *slc_lr;
 
+	struct hw_db_inline_resource_db_hsh {
+		struct hw_db_inline_hsh_data data;
+		int ref;
+	} *hsh;
+
 	uint32_t nb_cot;
 	uint32_t nb_qsl;
 	uint32_t nb_slc_lr;
+	uint32_t nb_hsh;
 
 	/* Items */
 	struct hw_db_inline_resource_db_cat {
@@ -122,6 +128,21 @@  int hw_db_inline_create(struct flow_nic_dev *ndev, void **db_handle)
 		}
 	}
 
+	db->cfn = calloc(db->nb_cat, sizeof(struct hw_db_inline_resource_db_cfn));
+
+	if (db->cfn == NULL) {
+		hw_db_inline_destroy(db);
+		return -1;
+	}
+
+	db->nb_hsh = ndev->be.hsh.nb_rcp;
+	db->hsh = calloc(db->nb_hsh, sizeof(struct hw_db_inline_resource_db_hsh));
+
+	if (db->hsh == NULL) {
+		hw_db_inline_destroy(db);
+		return -1;
+	}
+
 	*db_handle = db;
 	return 0;
 }
@@ -133,6 +154,8 @@  void hw_db_inline_destroy(void *db_handle)
 	free(db->cot);
 	free(db->qsl);
 	free(db->slc_lr);
+	free(db->hsh);
+
 	free(db->cat);
 
 	if (db->km) {
@@ -180,6 +203,10 @@  void hw_db_inline_deref_idxs(struct flow_nic_dev *ndev, void *db_handle, struct
 			hw_db_inline_km_ft_deref(ndev, db_handle, *(struct hw_db_km_ft *)&idxs[i]);
 			break;
 
+		case HW_DB_IDX_TYPE_HSH:
+			hw_db_inline_hsh_deref(ndev, db_handle, *(struct hw_db_hsh_idx *)&idxs[i]);
+			break;
+
 		default:
 			break;
 		}
@@ -219,6 +246,9 @@  const void *hw_db_inline_find_data(struct flow_nic_dev *ndev, void *db_handle,
 		case HW_DB_IDX_TYPE_KM_FT:
 			return NULL;	/* FTs can't be easily looked up */
 
+		case HW_DB_IDX_TYPE_HSH:
+			return &db->hsh[idxs[i].ids].data;
+
 		default:
 			return NULL;
 		}
@@ -247,6 +277,7 @@  int hw_db_inline_setup_mbr_filter(struct flow_nic_dev *ndev, uint32_t cat_hw_id,
 {
 	(void)ft;
 	(void)qsl_hw_id;
+	(void)ft;
 
 	const int offset = ((int)ndev->be.cat.cts_num + 1) / 2;
 	(void)offset;
@@ -848,3 +879,114 @@  void hw_db_inline_km_ft_deref(struct flow_nic_dev *ndev, void *db_handle, struct
 		km_rcp->ft[cat_offset + idx.id1].ref = 0;
 	}
 }
+
+/******************************************************************************/
+/* HSH                                                                        */
+/******************************************************************************/
+
+static int hw_db_inline_hsh_compare(const struct hw_db_inline_hsh_data *data1,
+	const struct hw_db_inline_hsh_data *data2)
+{
+	for (uint32_t i = 0; i < MAX_RSS_KEY_LEN; ++i)
+		if (data1->key[i] != data2->key[i])
+			return 0;
+
+	return data1->func == data2->func && data1->hash_mask == data2->hash_mask;
+}
+
+struct hw_db_hsh_idx hw_db_inline_hsh_add(struct flow_nic_dev *ndev, void *db_handle,
+	const struct hw_db_inline_hsh_data *data)
+{
+	struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+	struct hw_db_hsh_idx idx = { .raw = 0 };
+	int found = 0;
+
+	idx.type = HW_DB_IDX_TYPE_HSH;
+
+	/* check if default hash configuration shall be used, i.e. rss_hf is not set */
+	/*
+	 * NOTE: hsh id 0 is reserved for "default"
+	 * HSH used by port configuration; All ports share the same default hash settings.
+	 */
+	if (data->hash_mask == 0) {
+		idx.ids = 0;
+		hw_db_inline_hsh_ref(ndev, db, idx);
+		return idx;
+	}
+
+	for (uint32_t i = 1; i < db->nb_hsh; ++i) {
+		int ref = db->hsh[i].ref;
+
+		if (ref > 0 && hw_db_inline_hsh_compare(data, &db->hsh[i].data)) {
+			idx.ids = i;
+			hw_db_inline_hsh_ref(ndev, db, idx);
+			return idx;
+		}
+
+		if (!found && ref <= 0) {
+			found = 1;
+			idx.ids = i;
+		}
+	}
+
+	if (!found) {
+		idx.error = 1;
+		return idx;
+	}
+
+	struct nt_eth_rss_conf tmp_rss_conf;
+
+	tmp_rss_conf.rss_hf = data->hash_mask;
+	memcpy(tmp_rss_conf.rss_key, data->key, MAX_RSS_KEY_LEN);
+	tmp_rss_conf.algorithm = data->func;
+	int res = flow_nic_set_hasher_fields(ndev, idx.ids, tmp_rss_conf);
+
+	if (res != 0) {
+		idx.error = 1;
+		return idx;
+	}
+
+	db->hsh[idx.ids].ref = 1;
+	memcpy(&db->hsh[idx.ids].data, data, sizeof(struct hw_db_inline_hsh_data));
+	flow_nic_mark_resource_used(ndev, RES_HSH_RCP, idx.ids);
+
+	hw_mod_hsh_rcp_flush(&ndev->be, idx.ids, 1);
+
+	return idx;
+}
+
+void hw_db_inline_hsh_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_hsh_idx idx)
+{
+	(void)ndev;
+	struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+
+	if (!idx.error)
+		db->hsh[idx.ids].ref += 1;
+}
+
+void hw_db_inline_hsh_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_hsh_idx idx)
+{
+	struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+
+	if (idx.error)
+		return;
+
+	db->hsh[idx.ids].ref -= 1;
+
+	if (db->hsh[idx.ids].ref <= 0) {
+		/*
+		 * NOTE: hsh id 0 is reserved for "default" HSH used by
+		 * port configuration, so we shall keep it even if
+		 * it is not used by any flow
+		 */
+		if (idx.ids > 0) {
+			hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, idx.ids, 0, 0x0);
+			hw_mod_hsh_rcp_flush(&ndev->be, idx.ids, 1);
+
+			memset(&db->hsh[idx.ids].data, 0x0, sizeof(struct hw_db_inline_hsh_data));
+			flow_nic_free_resource(ndev, RES_HSH_RCP, idx.ids);
+		}
+
+		db->hsh[idx.ids].ref = 0;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.h b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.h
index e104ba7327..c97bdef1b7 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.h
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.h
@@ -60,6 +60,10 @@  struct hw_db_km_ft {
 	HW_DB_IDX;
 };
 
+struct hw_db_hsh_idx {
+	HW_DB_IDX;
+};
+
 enum hw_db_idx_type {
 	HW_DB_IDX_TYPE_NONE = 0,
 	HW_DB_IDX_TYPE_COT,
@@ -68,6 +72,7 @@  enum hw_db_idx_type {
 	HW_DB_IDX_TYPE_SLC_LR,
 	HW_DB_IDX_TYPE_KM_RCP,
 	HW_DB_IDX_TYPE_KM_FT,
+	HW_DB_IDX_TYPE_HSH,
 };
 
 /* Functionality data types */
@@ -133,6 +138,7 @@  struct hw_db_inline_action_set_data {
 		struct {
 			struct hw_db_cot_idx cot;
 			struct hw_db_qsl_idx qsl;
+			struct hw_db_hsh_idx hsh;
 		};
 	};
 };
@@ -175,6 +181,11 @@  void hw_db_inline_slc_lr_ref(struct flow_nic_dev *ndev, void *db_handle,
 void hw_db_inline_slc_lr_deref(struct flow_nic_dev *ndev, void *db_handle,
 	struct hw_db_slc_lr_idx idx);
 
+struct hw_db_hsh_idx hw_db_inline_hsh_add(struct flow_nic_dev *ndev, void *db_handle,
+	const struct hw_db_inline_hsh_data *data);
+void hw_db_inline_hsh_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_hsh_idx idx);
+void hw_db_inline_hsh_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_hsh_idx idx);
+
 /**/
 
 struct hw_db_cat_idx hw_db_inline_cat_add(struct flow_nic_dev *ndev, void *db_handle,
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
index bf6cbcf37d..8ba100edd7 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
@@ -25,6 +25,15 @@ 
 #define NT_VIOLATING_MBR_CFN 0
 #define NT_VIOLATING_MBR_QSL 1
 
+#define RTE_ETH_RSS_UDP_COMBINED                                                                  \
+	(RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX)
+
+#define RTE_ETH_RSS_TCP_COMBINED                                                                  \
+	(RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX)
+
+#define NT_FLM_OP_UNLEARN 0
+#define NT_FLM_OP_LEARN 1
+
 static void *flm_lrn_queue_arr;
 
 static int rx_queue_idx_to_hw_id(const struct flow_eth_dev *dev, int id)
@@ -2322,10 +2331,27 @@  static void setup_db_qsl_data(struct nic_flow_def *fd, struct hw_db_inline_qsl_d
 	}
 }
 
+static void setup_db_hsh_data(struct nic_flow_def *fd, struct hw_db_inline_hsh_data *hsh_data)
+{
+	memset(hsh_data, 0x0, sizeof(struct hw_db_inline_hsh_data));
+
+	hsh_data->func = fd->hsh.func;
+	hsh_data->hash_mask = fd->hsh.types;
+
+	if (fd->hsh.key != NULL) {
+		/*
+		 * Just a safeguard. Check and error handling of rss_key_len
+		 * shall be done at api layers above.
+		 */
+		memcpy(&hsh_data->key, fd->hsh.key,
+			fd->hsh.key_len < MAX_RSS_KEY_LEN ? fd->hsh.key_len : MAX_RSS_KEY_LEN);
+	}
+}
+
 static int setup_flow_flm_actions(struct flow_eth_dev *dev,
 	const struct nic_flow_def *fd,
 	const struct hw_db_inline_qsl_data *qsl_data,
-	const struct hw_db_inline_hsh_data *hsh_data __rte_unused,
+	const struct hw_db_inline_hsh_data *hsh_data,
 	uint32_t group __rte_unused,
 	uint32_t local_idxs[],
 	uint32_t *local_idx_counter,
@@ -2362,6 +2388,17 @@  static int setup_flow_flm_actions(struct flow_eth_dev *dev,
 		return -1;
 	}
 
+	/* Setup HSH */
+	struct hw_db_hsh_idx hsh_idx =
+		hw_db_inline_hsh_add(dev->ndev, dev->ndev->hw_db_handle, hsh_data);
+	local_idxs[(*local_idx_counter)++] = hsh_idx.raw;
+
+	if (hsh_idx.error) {
+		NT_LOG(ERR, FILTER, "Could not reference HSH resource");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		return -1;
+	}
+
 	/* Setup SLC LR */
 	struct hw_db_slc_lr_idx slc_lr_idx = { .raw = 0 };
 
@@ -2405,6 +2442,7 @@  static struct flow_handle *create_flow_filter(struct flow_eth_dev *dev, struct n
 	setup_db_qsl_data(fd, &qsl_data, num_dest_port, num_queues);
 
 	struct hw_db_inline_hsh_data hsh_data;
+	setup_db_hsh_data(fd, &hsh_data);
 
 	if (attr->group > 0 && fd_has_empty_pattern(fd)) {
 		/*
@@ -2488,6 +2526,19 @@  static struct flow_handle *create_flow_filter(struct flow_eth_dev *dev, struct n
 				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
 				goto error_out;
 			}
+
+			/* Setup HSH */
+			struct hw_db_hsh_idx hsh_idx =
+				hw_db_inline_hsh_add(dev->ndev, dev->ndev->hw_db_handle,
+				&hsh_data);
+			fh->db_idxs[fh->db_idx_counter++] = hsh_idx.raw;
+			action_set_data.hsh = hsh_idx;
+
+			if (hsh_idx.error) {
+				NT_LOG(ERR, FILTER, "Could not reference HSH resource");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				goto error_out;
+			}
 		}
 
 		/* Setup CAT */
@@ -2667,6 +2718,122 @@  static struct flow_handle *create_flow_filter(struct flow_eth_dev *dev, struct n
 	return NULL;
 }
 
+/*
+ * FPGA uses up to 10 32-bit words (320 bits) for hash calculation + 8 bits for L4 protocol number.
+ * Hashed data are split between two 128-bit Quad Words (QW)
+ * and two 32-bit Words (W), which can refer to different header parts.
+ */
+enum hsh_words_id {
+	HSH_WORDS_QW0 = 0,
+	HSH_WORDS_QW4,
+	HSH_WORDS_W8,
+	HSH_WORDS_W9,
+	HSH_WORDS_SIZE,
+};
+
+/* struct with details about hash QWs & Ws */
+struct hsh_words {
+	/*
+	 * index of W (word) or index of 1st word of QW (quad word)
+	 * is used for hash mask calculation
+	 */
+	uint8_t index;
+	uint8_t toeplitz_index;	/* offset in Bytes of given [Q]W inside Toeplitz RSS key */
+	enum hw_hsh_e pe;	/* offset to header part, e.g. beginning of L4 */
+	enum hw_hsh_e ofs;	/* relative offset in BYTES to 'pe' header offset above */
+	uint16_t bit_len;	/* max length of header part in bits to fit into QW/W */
+	bool free;	/* only free words can be used for hsh calculation */
+};
+
+static enum hsh_words_id get_free_word(struct hsh_words *words, uint16_t bit_len)
+{
+	enum hsh_words_id ret = HSH_WORDS_SIZE;
+	uint16_t ret_bit_len = UINT16_MAX;
+
+	for (enum hsh_words_id i = HSH_WORDS_QW0; i < HSH_WORDS_SIZE; i++) {
+		if (words[i].free && bit_len <= words[i].bit_len &&
+			words[i].bit_len < ret_bit_len) {
+			ret = i;
+			ret_bit_len = words[i].bit_len;
+		}
+	}
+
+	return ret;
+}
+
+static int flow_nic_set_hasher_part_inline(struct flow_nic_dev *ndev, int hsh_idx,
+	struct hsh_words *words, uint32_t pe, uint32_t ofs,
+	int bit_len, bool toeplitz)
+{
+	int res = 0;
+
+	/* check if there is any free word, which can accommodate header part of given 'bit_len' */
+	enum hsh_words_id word = get_free_word(words, bit_len);
+
+	if (word == HSH_WORDS_SIZE) {
+		NT_LOG(ERR, FILTER, "Cannot add additional %d bits into hash", bit_len);
+		return -1;
+	}
+
+	words[word].free = false;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, words[word].pe, hsh_idx, 0, pe);
+	NT_LOG(DBG, FILTER, "hw_mod_hsh_rcp_set(&ndev->be, %d, %d, 0, %d)", words[word].pe,
+		hsh_idx, pe);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, words[word].ofs, hsh_idx, 0, ofs);
+	NT_LOG(DBG, FILTER, "hw_mod_hsh_rcp_set(&ndev->be, %d, %d, 0, %d)", words[word].ofs,
+		hsh_idx, ofs);
+
+	/* set HW_HSH_RCP_WORD_MASK based on used QW/W and given 'bit_len' */
+	int mask_bit_len = bit_len;
+	uint32_t mask = 0x0;
+	uint32_t mask_be = 0x0;
+	uint32_t toeplitz_mask[9] = { 0x0 };
+	/* iterate through all words of QW */
+	uint16_t words_count = words[word].bit_len / 32;
+
+	for (uint16_t mask_off = 1; mask_off <= words_count; mask_off++) {
+		if (mask_bit_len >= 32) {
+			mask_bit_len -= 32;
+			mask = 0xffffffff;
+			mask_be = mask;
+
+		} else if (mask_bit_len > 0) {
+			/* keep bits from left to right, i.e. little to big endian */
+			mask_be = 0xffffffff >> (32 - mask_bit_len);
+			mask = mask_be << (32 - mask_bit_len);
+			mask_bit_len = 0;
+
+		} else {
+			mask = 0x0;
+			mask_be = 0x0;
+		}
+
+		/* reorder QW words mask from little to big endian */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx,
+			words[word].index + words_count - mask_off, mask);
+		NT_LOG(DBG, FILTER,
+			"hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, %d, %d, 0x%" PRIX32
+			")",
+			hsh_idx, words[word].index + words_count - mask_off, mask);
+		toeplitz_mask[words[word].toeplitz_index + mask_off - 1] = mask_be;
+	}
+
+	if (toeplitz) {
+		NT_LOG(DBG, FILTER,
+			"Partial Toeplitz RSS key mask: %08" PRIX32 " %08" PRIX32 " %08" PRIX32
+			" %08" PRIX32 " %08" PRIX32 " %08" PRIX32 " %08" PRIX32 " %08" PRIX32
+			" %08" PRIX32 "",
+			toeplitz_mask[8], toeplitz_mask[7], toeplitz_mask[6], toeplitz_mask[5],
+			toeplitz_mask[4], toeplitz_mask[3], toeplitz_mask[2], toeplitz_mask[1],
+			toeplitz_mask[0]);
+		NT_LOG(DBG, FILTER,
+			"                               MSB                                                                          LSB");
+	}
+
+	return res;
+}
+
 /*
  * Public functions
  */
@@ -2717,6 +2884,12 @@  int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
 
 		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
 
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
 		/* Setup filter using matching all packets violating traffic policing parameters */
 		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, NT_VIOLATING_MBR_CFN);
 		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, NT_VIOLATING_MBR_QSL);
@@ -2783,6 +2956,10 @@  int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
 		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
 		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
 
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
 		hw_db_inline_destroy(ndev->hw_db_handle);
 
 #ifdef FLOW_DEBUG
@@ -2980,6 +3157,672 @@  int flow_destroy_profile_inline(struct flow_eth_dev *dev, struct flow_handle *fl
 	return err;
 }
 
+static __rte_always_inline bool all_bits_enabled(uint64_t hash_mask, uint64_t hash_bits)
+{
+	return (hash_mask & hash_bits) == hash_bits;
+}
+
+static __rte_always_inline void unset_bits(uint64_t *hash_mask, uint64_t hash_bits)
+{
+	*hash_mask &= ~hash_bits;
+}
+
+static __rte_always_inline void unset_bits_and_log(uint64_t *hash_mask, uint64_t hash_bits)
+{
+	char rss_buffer[4096];
+	uint16_t rss_buffer_len = sizeof(rss_buffer);
+
+	if (sprint_nt_rss_mask(rss_buffer, rss_buffer_len, " ", *hash_mask & hash_bits) == 0)
+		NT_LOG(DBG, FILTER, "Configured RSS types:%s", rss_buffer);
+
+	unset_bits(hash_mask, hash_bits);
+}
+
+static __rte_always_inline void unset_bits_if_all_enabled(uint64_t *hash_mask, uint64_t hash_bits)
+{
+	if (all_bits_enabled(*hash_mask, hash_bits))
+		unset_bits(hash_mask, hash_bits);
+}
+
+int flow_nic_set_hasher_fields_inline(struct flow_nic_dev *ndev, int hsh_idx,
+	struct nt_eth_rss_conf rss_conf)
+{
+	uint64_t fields = rss_conf.rss_hf;
+
+	char rss_buffer[4096];
+	uint16_t rss_buffer_len = sizeof(rss_buffer);
+
+	if (sprint_nt_rss_mask(rss_buffer, rss_buffer_len, " ", fields) == 0)
+		NT_LOG(DBG, FILTER, "Requested RSS types:%s", rss_buffer);
+
+	/*
+	 * configure all (Q)Words usable for hash calculation
+	 * Hash can be calculated from 4 independent header parts:
+	 *      | QW0           | Qw4           | W8| W9|
+	 * word | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+	 */
+	struct hsh_words words[HSH_WORDS_SIZE] = {
+		{ 0, 5, HW_HSH_RCP_QW0_PE, HW_HSH_RCP_QW0_OFS, 128, true },
+		{ 4, 1, HW_HSH_RCP_QW4_PE, HW_HSH_RCP_QW4_OFS, 128, true },
+		{ 8, 0, HW_HSH_RCP_W8_PE, HW_HSH_RCP_W8_OFS, 32, true },
+		{
+			9, 255, HW_HSH_RCP_W9_PE, HW_HSH_RCP_W9_OFS, 32,
+			true
+		},	/* not supported for Toeplitz */
+	};
+
+	int res = 0;
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	/* enable hashing */
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx, 0, 2);
+
+	/* configure selected hash function and its key */
+	bool toeplitz = false;
+
+	switch (rss_conf.algorithm) {
+	case RTE_ETH_HASH_FUNCTION_DEFAULT:
+		/* Use default NTH10 hashing algorithm */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_TOEPLITZ, hsh_idx, 0, 0);
+		/* Use 1st 32-bits from rss_key to configure NTH10 SEED */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+			rss_conf.rss_key[0] << 24 | rss_conf.rss_key[1] << 16 |
+			rss_conf.rss_key[2] << 8 | rss_conf.rss_key[3]);
+		break;
+
+	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
+		toeplitz = true;
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_TOEPLITZ, hsh_idx, 0, 1);
+		uint8_t empty_key = 0;
+
+		/* Toeplitz key (always 40B) must be encoded from little to big endian */
+		for (uint8_t i = 0; i <= (MAX_RSS_KEY_LEN - 8); i += 8) {
+			res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_K, hsh_idx, i / 4,
+				rss_conf.rss_key[i + 4] << 24 |
+				rss_conf.rss_key[i + 5] << 16 |
+				rss_conf.rss_key[i + 6] << 8 |
+				rss_conf.rss_key[i + 7]);
+			NT_LOG(DBG, FILTER,
+				"hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_K, %d, %d, 0x%" PRIX32
+				")",
+				hsh_idx, i / 4,
+				rss_conf.rss_key[i + 4] << 24 | rss_conf.rss_key[i + 5] << 16 |
+				rss_conf.rss_key[i + 6] << 8 | rss_conf.rss_key[i + 7]);
+			res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_K, hsh_idx, i / 4 + 1,
+				rss_conf.rss_key[i] << 24 |
+				rss_conf.rss_key[i + 1] << 16 |
+				rss_conf.rss_key[i + 2] << 8 |
+				rss_conf.rss_key[i + 3]);
+			NT_LOG(DBG, FILTER,
+				"hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_K, %d, %d, 0x%" PRIX32
+				")",
+				hsh_idx, i / 4 + 1,
+				rss_conf.rss_key[i] << 24 | rss_conf.rss_key[i + 1] << 16 |
+				rss_conf.rss_key[i + 2] << 8 | rss_conf.rss_key[i + 3]);
+			empty_key |= rss_conf.rss_key[i] | rss_conf.rss_key[i + 1] |
+				rss_conf.rss_key[i + 2] | rss_conf.rss_key[i + 3] |
+				rss_conf.rss_key[i + 4] | rss_conf.rss_key[i + 5] |
+				rss_conf.rss_key[i + 6] | rss_conf.rss_key[i + 7];
+		}
+
+		if (empty_key == 0) {
+			NT_LOG(ERR, FILTER,
+				"Toeplitz key must be configured. Key with all bytes set to zero is not allowed.");
+			return -1;
+		}
+
+		words[HSH_WORDS_W9].free = false;
+		NT_LOG(DBG, FILTER,
+			"Toeplitz hashing is enabled thus W9 and P_MASK cannot be used.");
+		break;
+
+	default:
+		NT_LOG(ERR, FILTER, "Unknown hashing function %d requested", rss_conf.algorithm);
+		return -1;
+	}
+
+	/* indication that some IPv6 flag is present */
+	bool ipv6 = fields & (NT_ETH_RSS_IPV6_MASK);
+	/* store proto mask for later use at IP and L4 checksum handling */
+	uint64_t l4_proto_mask = fields &
+		(RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+		RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+		RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX);
+
+	/* outermost headers are used by default, so innermost bit takes precedence if detected */
+	bool outer = (fields & RTE_ETH_RSS_LEVEL_INNERMOST) ? false : true;
+	unset_bits(&fields, RTE_ETH_RSS_LEVEL_MASK);
+
+	if (fields == 0) {
+		NT_LOG(ERR, FILTER, "RSS hash configuration 0x%" PRIX64 " is not valid.",
+			rss_conf.rss_hf);
+		return -1;
+	}
+
+	/* indication that IPv4 `protocol` or IPv6 `next header` fields shall be part of the hash
+	 */
+	bool l4_proto_hash = false;
+
+	/*
+	 * check if SRC_ONLY & DST_ONLY are used simultaneously;
+	 * According to DPDK, we shall behave like none of these bits is set
+	 */
+	unset_bits_if_all_enabled(&fields, RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY);
+	unset_bits_if_all_enabled(&fields, RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
+	unset_bits_if_all_enabled(&fields, RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
+
+	/* L2 */
+	if (fields & (RTE_ETH_RSS_ETH | RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY)) {
+		if (outer) {
+			if (fields & RTE_ETH_RSS_L2_SRC_ONLY) {
+				NT_LOG(DBG, FILTER, "Set outer src MAC hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_L2, 6, 48, toeplitz);
+
+			} else if (fields & RTE_ETH_RSS_L2_DST_ONLY) {
+				NT_LOG(DBG, FILTER, "Set outer dst MAC hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_L2, 0, 48, toeplitz);
+
+			} else {
+				NT_LOG(DBG, FILTER, "Set outer src & dst MAC hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_L2, 0, 96, toeplitz);
+			}
+
+		} else if (fields & RTE_ETH_RSS_L2_SRC_ONLY) {
+			NT_LOG(DBG, FILTER, "Set inner src MAC hasher.");
+			res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_L2, 6,
+				48, toeplitz);
+
+		} else if (fields & RTE_ETH_RSS_L2_DST_ONLY) {
+			NT_LOG(DBG, FILTER, "Set inner dst MAC hasher.");
+			res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_L2, 0,
+				48, toeplitz);
+
+		} else {
+			NT_LOG(DBG, FILTER, "Set inner src & dst MAC hasher.");
+			res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_L2, 0,
+				96, toeplitz);
+		}
+
+		unset_bits_and_log(&fields,
+			RTE_ETH_RSS_ETH | RTE_ETH_RSS_L2_SRC_ONLY |
+			RTE_ETH_RSS_L2_DST_ONLY);
+	}
+
+	/*
+	 * VLAN support of multiple VLAN headers,
+	 * where S-VLAN is the first and C-VLAN the last VLAN header
+	 */
+	if (fields & RTE_ETH_RSS_C_VLAN) {
+		/*
+		 * use MPLS protocol offset, which points just after ethertype with relative
+		 * offset -6 (i.e. 2 bytes
+		 * of ethertype & size + 4 bytes of VLAN header field) to access last vlan header
+		 */
+		if (outer) {
+			NT_LOG(DBG, FILTER, "Set outer C-VLAN hasher.");
+			/*
+			 * use whole 32-bit 802.1a tag - backward compatible
+			 * with VSWITCH implementation
+			 */
+			res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_MPLS, -6,
+				32, toeplitz);
+
+		} else {
+			NT_LOG(DBG, FILTER, "Set inner C-VLAN hasher.");
+			/*
+			 * use whole 32-bit 802.1a tag - backward compatible
+			 * with VSWITCH implementation
+			 */
+			res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_MPLS,
+				-6, 32, toeplitz);
+		}
+
+		unset_bits_and_log(&fields, RTE_ETH_RSS_C_VLAN);
+	}
+
+	if (fields & RTE_ETH_RSS_S_VLAN) {
+		if (outer) {
+			NT_LOG(DBG, FILTER, "Set outer S-VLAN hasher.");
+			/*
+			 * use whole 32-bit 802.1a tag - backward compatible
+			 * with VSWITCH implementation
+			 */
+			res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+				DYN_FIRST_VLAN, 0, 32, toeplitz);
+
+		} else {
+			NT_LOG(DBG, FILTER, "Set inner S-VLAN hasher.");
+			/*
+			 * use whole 32-bit 802.1a tag - backward compatible
+			 * with VSWITCH implementation
+			 */
+			res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_VLAN,
+				0, 32, toeplitz);
+		}
+
+		unset_bits_and_log(&fields, RTE_ETH_RSS_S_VLAN);
+	}
+	/* L2 payload */
+	/* calculate hash of 128-bits of l2 payload; Use MPLS protocol offset to address the
+	 * beginning of L2 payload even if MPLS header is not present
+	 */
+	if (fields & RTE_ETH_RSS_L2_PAYLOAD) {
+		uint64_t outer_fields_enabled = 0;
+
+		if (outer) {
+			NT_LOG(DBG, FILTER, "Set outer L2 payload hasher.");
+			res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_MPLS, 0,
+				128, toeplitz);
+
+		} else {
+			NT_LOG(DBG, FILTER, "Set inner L2 payload hasher.");
+			res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_MPLS,
+				0, 128, toeplitz);
+			outer_fields_enabled = fields & RTE_ETH_RSS_GTPU;
+		}
+
+		/*
+		 * L2 PAYLOAD hashing overrides all L3 & L4 RSS flags.
+		 * Thus we can clear all remaining (supported)
+		 * RSS flags...
+		 */
+		unset_bits_and_log(&fields, NT_ETH_RSS_OFFLOAD_MASK);
+		/*
+		 * ...but in case of INNER L2 PAYLOAD we must process
+		 * "always outer" GTPU field if enabled
+		 */
+		fields |= outer_fields_enabled;
+	}
+
+	/* L3 + L4 protocol number */
+	if (fields & RTE_ETH_RSS_IPV4_CHKSUM) {
+		/* only IPv4 checksum is supported by DPDK RTE_ETH_RSS_* types */
+		if (ipv6) {
+			NT_LOG(ERR, FILTER,
+				"RSS: IPv4 checksum requested with IPv6 header hashing!");
+			res = 1;
+
+		} else if (outer) {
+			NT_LOG(DBG, FILTER, "Set outer IPv4 checksum hasher.");
+			res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_L3, 10,
+				16, toeplitz);
+
+		} else {
+			NT_LOG(DBG, FILTER, "Set inner IPv4 checksum hasher.");
+			res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_L3,
+				10, 16, toeplitz);
+		}
+
+		/*
+		 * L3 checksum is made from whole L3 header, i.e. no need to process other
+		 * L3 hashing flags
+		 */
+		unset_bits_and_log(&fields, RTE_ETH_RSS_IPV4_CHKSUM | NT_ETH_RSS_IP_MASK);
+	}
+
+	if (fields & NT_ETH_RSS_IP_MASK) {
+		if (ipv6) {
+			if (outer) {
+				if (fields & RTE_ETH_RSS_L3_SRC_ONLY) {
+					NT_LOG(DBG, FILTER, "Set outer IPv6/IPv4 src hasher.");
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words,
+						DYN_FINAL_IP_DST,
+						-16, 128, toeplitz);
+
+				} else if (fields & RTE_ETH_RSS_L3_DST_ONLY) {
+					NT_LOG(DBG, FILTER, "Set outer IPv6/IPv4 dst hasher.");
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words,
+						DYN_FINAL_IP_DST, 0,
+						128, toeplitz);
+
+				} else {
+					NT_LOG(DBG, FILTER,
+						"Set outer IPv6/IPv4 src & dst hasher.");
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words,
+						DYN_FINAL_IP_DST,
+						-16, 128, toeplitz);
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words,
+						DYN_FINAL_IP_DST, 0,
+						128, toeplitz);
+				}
+
+			} else if (fields & RTE_ETH_RSS_L3_SRC_ONLY) {
+				NT_LOG(DBG, FILTER, "Set inner IPv6/IPv4 src hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_TUN_FINAL_IP_DST, -16,
+					128, toeplitz);
+
+			} else if (fields & RTE_ETH_RSS_L3_DST_ONLY) {
+				NT_LOG(DBG, FILTER, "Set inner IPv6/IPv4 dst hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_TUN_FINAL_IP_DST, 0,
+					128, toeplitz);
+
+			} else {
+				NT_LOG(DBG, FILTER, "Set inner IPv6/IPv4 src & dst hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_TUN_FINAL_IP_DST, -16,
+					128, toeplitz);
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_TUN_FINAL_IP_DST, 0,
+					128, toeplitz);
+			}
+
+			/* check if fragment ID shall be part of hash */
+			if (fields & (RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_FRAG_IPV6)) {
+				if (outer) {
+					NT_LOG(DBG, FILTER,
+						"Set outer IPv6/IPv4 fragment ID hasher.");
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words,
+						DYN_ID_IPV4_6, 0,
+						32, toeplitz);
+
+				} else {
+					NT_LOG(DBG, FILTER,
+						"Set inner IPv6/IPv4 fragment ID hasher.");
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words,
+						DYN_TUN_ID_IPV4_6,
+						0, 32, toeplitz);
+				}
+			}
+
+			res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK, hsh_idx, 0,
+				1);
+
+		} else {
+			/* IPv4 */
+			if (outer) {
+				if (fields & RTE_ETH_RSS_L3_SRC_ONLY) {
+					NT_LOG(DBG, FILTER, "Set outer IPv4 src only hasher.");
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words, DYN_L3, 12,
+						32, toeplitz);
+
+				} else if (fields & RTE_ETH_RSS_L3_DST_ONLY) {
+					NT_LOG(DBG, FILTER, "Set outer IPv4 dst only hasher.");
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words, DYN_L3, 16,
+						32, toeplitz);
+
+				} else {
+					NT_LOG(DBG, FILTER, "Set outer IPv4 src & dst hasher.");
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words, DYN_L3, 12,
+						64, toeplitz);
+				}
+
+			} else if (fields & RTE_ETH_RSS_L3_SRC_ONLY) {
+				NT_LOG(DBG, FILTER, "Set inner IPv4 src only hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_TUN_L3, 12, 32,
+					toeplitz);
+
+			} else if (fields & RTE_ETH_RSS_L3_DST_ONLY) {
+				NT_LOG(DBG, FILTER, "Set inner IPv4 dst only hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_TUN_L3, 16, 32,
+					toeplitz);
+
+			} else {
+				NT_LOG(DBG, FILTER, "Set inner IPv4 src & dst hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_TUN_L3, 12, 64,
+					toeplitz);
+			}
+
+			/* check if fragment ID shall be part of hash */
+			if (fields & RTE_ETH_RSS_FRAG_IPV4) {
+				if (outer) {
+					NT_LOG(DBG, FILTER,
+						"Set outer IPv4 fragment ID hasher.");
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words,
+						DYN_ID_IPV4_6, 0,
+						16, toeplitz);
+
+				} else {
+					NT_LOG(DBG, FILTER,
+						"Set inner IPv4 fragment ID hasher.");
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words,
+						DYN_TUN_ID_IPV4_6,
+						0, 16, toeplitz);
+				}
+			}
+		}
+
+		/* check if L4 protocol type shall be part of hash */
+		if (l4_proto_mask)
+			l4_proto_hash = true;
+
+		unset_bits_and_log(&fields, NT_ETH_RSS_IP_MASK);
+	}
+
+	/* L4 */
+	if (fields & (RTE_ETH_RSS_PORT | RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) {
+		if (outer) {
+			if (fields & RTE_ETH_RSS_L4_SRC_ONLY) {
+				NT_LOG(DBG, FILTER, "Set outer L4 src hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_L4, 0, 16, toeplitz);
+
+			} else if (fields & RTE_ETH_RSS_L4_DST_ONLY) {
+				NT_LOG(DBG, FILTER, "Set outer L4 dst hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_L4, 2, 16, toeplitz);
+
+			} else {
+				NT_LOG(DBG, FILTER, "Set outer L4 src & dst hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_L4, 0, 32, toeplitz);
+			}
+
+		} else if (fields & RTE_ETH_RSS_L4_SRC_ONLY) {
+			NT_LOG(DBG, FILTER, "Set inner L4 src hasher.");
+			res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_L4, 0,
+				16, toeplitz);
+
+		} else if (fields & RTE_ETH_RSS_L4_DST_ONLY) {
+			NT_LOG(DBG, FILTER, "Set inner L4 dst hasher.");
+			res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_L4, 2,
+				16, toeplitz);
+
+		} else {
+			NT_LOG(DBG, FILTER, "Set inner L4 src & dst hasher.");
+			res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_L4, 0,
+				32, toeplitz);
+		}
+
+		l4_proto_hash = true;
+		unset_bits_and_log(&fields,
+			RTE_ETH_RSS_PORT | RTE_ETH_RSS_L4_SRC_ONLY |
+			RTE_ETH_RSS_L4_DST_ONLY);
+	}
+
+	/* IPv4 protocol / IPv6 next header fields */
+	if (l4_proto_hash) {
+		/* NOTE: HW_HSH_RCP_P_MASK is not supported for Toeplitz and thus one of SW0, SW4
+		 * or W8 must be used to hash on `protocol` field of IPv4 or `next header` field of
+		 * IPv6 header.
+		 */
+		if (outer) {
+			NT_LOG(DBG, FILTER, "Set outer L4 protocol type / next header hasher.");
+
+			if (toeplitz) {
+				if (ipv6) {
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words, DYN_L3, 6, 8,
+						toeplitz);
+
+				} else {
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words, DYN_L3, 9, 8,
+						toeplitz);
+				}
+
+			} else {
+				res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0,
+					1);
+				res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_TNL_P, hsh_idx, 0,
+					0);
+			}
+
+		} else {
+			NT_LOG(DBG, FILTER, "Set inner L4 protocol type / next header hasher.");
+
+			if (toeplitz) {
+				if (ipv6) {
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words, DYN_TUN_L3,
+						6, 8, toeplitz);
+
+				} else {
+					res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx,
+						words, DYN_TUN_L3,
+						9, 8, toeplitz);
+				}
+
+			} else {
+				res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0,
+					1);
+				res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_TNL_P, hsh_idx, 0,
+					1);
+			}
+		}
+
+		l4_proto_hash = false;
+	}
+
+	/*
+	 * GTPU - for UPF use cases we always use TEID from outermost GTPU header
+	 * even if other headers are innermost
+	 */
+	if (fields & RTE_ETH_RSS_GTPU) {
+		NT_LOG(DBG, FILTER, "Set outer GTPU TEID hasher.");
+		res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_L4_PAYLOAD, 4, 32,
+			toeplitz);
+		unset_bits_and_log(&fields, RTE_ETH_RSS_GTPU);
+	}
+
+	/* Checksums */
+	/* only UDP, TCP and SCTP checksums are supported */
+	if (fields & RTE_ETH_RSS_L4_CHKSUM) {
+		switch (l4_proto_mask) {
+		case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+		case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+		case RTE_ETH_RSS_IPV6_UDP_EX:
+		case RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+		case RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_IPV6_UDP_EX:
+		case RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX:
+		case RTE_ETH_RSS_UDP_COMBINED:
+			if (outer) {
+				NT_LOG(DBG, FILTER, "Set outer UDP checksum hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_L4, 6, 16, toeplitz);
+
+			} else {
+				NT_LOG(DBG, FILTER, "Set inner UDP checksum hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_TUN_L4, 6, 16,
+					toeplitz);
+			}
+
+			unset_bits_and_log(&fields, RTE_ETH_RSS_L4_CHKSUM | l4_proto_mask);
+			break;
+
+		case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+		case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+		case RTE_ETH_RSS_IPV6_TCP_EX:
+		case RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+		case RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_IPV6_TCP_EX:
+		case RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX:
+		case RTE_ETH_RSS_TCP_COMBINED:
+			if (outer) {
+				NT_LOG(DBG, FILTER, "Set outer TCP checksum hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_L4, 16, 16, toeplitz);
+
+			} else {
+				NT_LOG(DBG, FILTER, "Set inner TCP checksum hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_TUN_L4, 16, 16,
+					toeplitz);
+			}
+
+			unset_bits_and_log(&fields, RTE_ETH_RSS_L4_CHKSUM | l4_proto_mask);
+			break;
+
+		case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+		case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
+		case RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
+			if (outer) {
+				NT_LOG(DBG, FILTER, "Set outer SCTP checksum hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_L4, 8, 32, toeplitz);
+
+			} else {
+				NT_LOG(DBG, FILTER, "Set inner SCTP checksum hasher.");
+				res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words,
+					DYN_TUN_L4, 8, 32,
+					toeplitz);
+			}
+
+			unset_bits_and_log(&fields, RTE_ETH_RSS_L4_CHKSUM | l4_proto_mask);
+			break;
+
+		case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
+		case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+
+		/* none or unsupported protocol was chosen */
+		case 0:
+			NT_LOG(ERR, FILTER,
+				"L4 checksum hashing is supported only for UDP, TCP and SCTP protocols");
+			res = -1;
+			break;
+
+		/* multiple L4 protocols were selected */
+		default:
+			NT_LOG(ERR, FILTER,
+				"L4 checksum hashing can be enabled just for one of UDP, TCP or SCTP protocols");
+			res = -1;
+			break;
+		}
+	}
+
+	if (fields || res != 0) {
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+
+		if (sprint_nt_rss_mask(rss_buffer, rss_buffer_len, " ", rss_conf.rss_hf) == 0) {
+			NT_LOG(ERR, FILTER,
+				"RSS configuration%s is not supported for hash func %s.",
+				rss_buffer,
+				(enum rte_eth_hash_function)toeplitz ? "Toeplitz" : "NTH10");
+
+		} else {
+			NT_LOG(ERR, FILTER,
+				"RSS configuration 0x%" PRIX64
+				" is not supported for hash func %s.",
+				rss_conf.rss_hf,
+				(enum rte_eth_hash_function)toeplitz ? "Toeplitz" : "NTH10");
+		}
+
+		return -1;
+	}
+
+	return res;
+}
+
+
 static const struct profile_inline_ops ops = {
 	/*
 	 * Management
@@ -2993,6 +3836,7 @@  static const struct profile_inline_ops ops = {
 	.flow_destroy_locked_profile_inline = flow_destroy_locked_profile_inline,
 	.flow_create_profile_inline = flow_create_profile_inline,
 	.flow_destroy_profile_inline = flow_destroy_profile_inline,
+	.flow_nic_set_hasher_fields_inline = flow_nic_set_hasher_fields_inline,
 };
 
 void profile_inline_init(void)
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h
index b87f8542ac..e623bb2352 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h
@@ -38,4 +38,8 @@  int flow_destroy_profile_inline(struct flow_eth_dev *dev,
 	struct flow_handle *flow,
 	struct rte_flow_error *error);
 
+int flow_nic_set_hasher_fields_inline(struct flow_nic_dev *ndev,
+	int hsh_idx,
+	struct nt_eth_rss_conf rss_conf);
+
 #endif	/* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/ntnic_mod_reg.h b/drivers/net/ntnic/ntnic_mod_reg.h
index 149c549112..1069be2f85 100644
--- a/drivers/net/ntnic/ntnic_mod_reg.h
+++ b/drivers/net/ntnic/ntnic_mod_reg.h
@@ -252,6 +252,10 @@  struct profile_inline_ops {
 	int (*flow_destroy_profile_inline)(struct flow_eth_dev *dev,
 		struct flow_handle *flow,
 		struct rte_flow_error *error);
+
+	int (*flow_nic_set_hasher_fields_inline)(struct flow_nic_dev *ndev,
+		int hsh_idx,
+		struct nt_eth_rss_conf rss_conf);
 };
 
 void register_profile_inline_ops(const struct profile_inline_ops *ops);