[2/3] net/octeontx2: add tm packet marking cb
diff mbox series

Message ID 20200417072254.11455-2-nithind1988@gmail.com
State New
Delegated to: Jerin Jacob
Headers show
Series
  • [1/3] mbuf: add Tx offloads for packet marking
Related show

Checks

Context Check Description
ci/Intel-compilation fail apply issues
ci/checkpatch warning coding style issues

Commit Message

Nithin Kumar D April 17, 2020, 7:22 a.m. UTC
From: Krzysztof Kanas <kkanas@marvell.com>

Add tm packet marking enable/disable callbacks for
VLAN DEI, IP DSCP and IP ECN marking to support
marking in VLAN, IPv4 and IPv6 packets.

Signed-off-by: Krzysztof Kanas <kkanas@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---

Depends-on:series-9450

 drivers/net/octeontx2/otx2_ethdev.c |  12 +-
 drivers/net/octeontx2/otx2_ethdev.h |   5 +
 drivers/net/octeontx2/otx2_tm.c     | 472 +++++++++++++++++++++++++++++++++++-
 drivers/net/octeontx2/otx2_tm.h     |  31 +++
 drivers/net/octeontx2/otx2_tx.h     |   9 +-
 5 files changed, 514 insertions(+), 15 deletions(-)

Patch
diff mbox series

diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c
index 3116e5c..1c7c465 100644
--- a/drivers/net/octeontx2/otx2_ethdev.c
+++ b/drivers/net/octeontx2/otx2_ethdev.c
@@ -655,8 +655,8 @@  nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	return flags;
 }
 
-static uint16_t
-nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
+uint16_t
+otx2_nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 {
 	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
 	uint64_t conf = dev->tx_offloads;
@@ -974,6 +974,10 @@  otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
 			send_mem->alg = NIX_SENDMEMALG_SETTSTMP;
 			send_mem->addr = txq->dev->tstamp.tx_tstamp_iova;
 		}
+
+		/* Update mark format */
+		txq->markfmt_en = txq->dev->tm_markfmt_en;
+
 		sg = (union nix_send_sg_s *)&txq->cmd[4];
 	} else {
 		send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
@@ -1663,7 +1667,7 @@  otx2_nix_configure(struct rte_eth_dev *eth_dev)
 	dev->rx_offloads = rxmode->offloads;
 	dev->tx_offloads = txmode->offloads;
 	dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
-	dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
+	dev->tx_offload_flags |= otx2_nix_tx_offload_flags(eth_dev);
 	dev->rss_info.rss_grps = NIX_RSS_GRPS;
 
 	nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
@@ -1829,7 +1833,7 @@  otx2_nix_configure(struct rte_eth_dev *eth_dev)
 	nix_lf_free(dev);
 fail_offloads:
 	dev->rx_offload_flags &= ~nix_rx_offload_flags(eth_dev);
-	dev->tx_offload_flags &= ~nix_tx_offload_flags(eth_dev);
+	dev->tx_offload_flags &= ~otx2_nix_tx_offload_flags(eth_dev);
 fail_configure:
 	dev->configured = 0;
 	return rc;
diff --git a/drivers/net/octeontx2/otx2_ethdev.h b/drivers/net/octeontx2/otx2_ethdev.h
index 0fbf68b..b34842a 100644
--- a/drivers/net/octeontx2/otx2_ethdev.h
+++ b/drivers/net/octeontx2/otx2_ethdev.h
@@ -304,6 +304,9 @@  struct otx2_eth_dev {
 	uint16_t txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
 	/* Contiguous queues */
 	uint16_t txschq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+	uint64_t tm_markfmt_en;
+	uint8_t tm_markfmt_null;
+	uint8_t tm_markfmt[NIX_TM_MARK_MAX][NIX_TM_MARK_COLOR_MAX];
 	uint16_t otx2_tm_root_lvl;
 	uint16_t link_cfg_lvl;
 	uint16_t tm_flags;
@@ -347,6 +350,7 @@  struct otx2_eth_txq {
 	rte_iova_t fc_iova;
 	uint16_t sqes_per_sqb_log2;
 	int16_t nb_sqb_bufs_adj;
+	uint64_t markfmt_en;
 	RTE_MARKER slow_path_start;
 	uint16_t nb_sqb_bufs;
 	uint16_t sq;
@@ -570,6 +574,7 @@  int otx2_ethdev_parse_devargs(struct rte_devargs *devargs,
 void otx2_eth_set_rx_function(struct rte_eth_dev *eth_dev);
 void otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev);
 void otx2_nix_form_default_desc(struct otx2_eth_txq *txq);
+uint16_t otx2_nix_tx_offload_flags(struct rte_eth_dev *eth_dev);
 
 /* Timesync - PTP routines */
 int otx2_nix_timesync_enable(struct rte_eth_dev *eth_dev);
diff --git a/drivers/net/octeontx2/otx2_tm.c b/drivers/net/octeontx2/otx2_tm.c
index e94a276..f11b828 100644
--- a/drivers/net/octeontx2/otx2_tm.c
+++ b/drivers/net/octeontx2/otx2_tm.c
@@ -20,6 +20,30 @@  enum otx2_tm_node_level {
 	OTX2_TM_LVL_MAX,
 };
 
+static const uint8_t y_mask_val[NIX_TM_MARK_MAX][2] = {
+	[NIX_TM_MARK_VLAN_DEI] = { 0x0, 0x8 },
+	[NIX_TM_MARK_IPV4_DSCP] = { 0x1, 0x2 },
+	[NIX_TM_MARK_IPV4_ECN] = { 0x0, 0xc },
+	[NIX_TM_MARK_IPV6_DSCP] = { 0x1, 0x2 },
+	[NIX_TM_MARK_IPV6_ECN] = { 0x0, 0x3 },
+};
+
+static const uint8_t r_mask_val[NIX_TM_MARK_MAX][2] = {
+	[NIX_TM_MARK_VLAN_DEI] = { 0x0, 0x8 },
+	[NIX_TM_MARK_IPV4_DSCP] = { 0x0, 0x3 },
+	[NIX_TM_MARK_IPV4_ECN] = { 0x0, 0xc },
+	[NIX_TM_MARK_IPV6_DSCP] = { 0x0, 0x3 },
+	[NIX_TM_MARK_IPV6_ECN] = { 0x0, 0x3 },
+};
+
+static const uint8_t mark_off[NIX_TM_MARK_MAX] = {
+	[NIX_TM_MARK_VLAN_DEI] = 0x3,  /* Byte 14 Bit[4:1] */
+	[NIX_TM_MARK_IPV4_DSCP] = 0x1, /* Byte 1 Bit[6:3] */
+	[NIX_TM_MARK_IPV4_ECN] = 0x6,  /* Byte 1 Bit[1:0], Byte 2 Bit[7:6]  */
+	[NIX_TM_MARK_IPV6_DSCP] = 0x5, /* Byte 0 Bit[2:0], Byte 1 Bit[7] */
+	[NIX_TM_MARK_IPV6_ECN] = 0x0,  /* Byte 1 Bit[7:4] */
+};
+
 static inline
 uint64_t shaper2regval(struct shaper_params *shaper)
 {
@@ -375,10 +399,10 @@  prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
 
 	otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, pir %" PRIu64
 		    "(%" PRIu64 "B), cir %" PRIu64 "(%" PRIu64 "B)"
-		    "adjust 0x%" PRIx64 "(pktmode %u) (%p)",
+		    "adjust 0x%" PRIx64 "(pktmode %u) red_alg %x (%p)",
 		    nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
 		    tm_node->id, pir.rate, pir.burst, cir.rate, cir.burst,
-		    adjust, tm_node->pkt_mode, tm_node);
+		    adjust, tm_node->pkt_mode, tm_node->red_algo, tm_node);
 
 	switch (tm_node->hw_lvl) {
 	case NIX_TXSCH_LVL_SMQ:
@@ -478,6 +502,46 @@  prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
 }
 
 static uint8_t
+prepare_tm_shaper_red_algo(struct otx2_nix_tm_node *tm_node,
+			   volatile uint64_t *reg, volatile uint64_t *regval,
+			   volatile uint64_t *regval_mask)
+{
+	uint32_t schq = tm_node->hw_id;
+	uint8_t k = 0;
+
+	otx2_tm_dbg("Shaper read alg node %s(%u) lvl %u id %u, red_alg %x (%p)",
+		     nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
+		     tm_node->id, tm_node->red_algo, tm_node);
+
+	/* Configure just RED algo */
+	regval[k] = ((uint64_t)tm_node->red_algo << 9);
+	regval_mask[k] = ~(BIT_ULL(10) | BIT_ULL(9));
+
+	switch (tm_node->hw_lvl) {
+	case NIX_TXSCH_LVL_SMQ:
+		reg[k] = NIX_AF_MDQX_SHAPE(schq);
+		k++;
+		break;
+	case NIX_TXSCH_LVL_TL4:
+		reg[k] = NIX_AF_TL4X_SHAPE(schq);
+		k++;
+		break;
+	case NIX_TXSCH_LVL_TL3:
+		reg[k] = NIX_AF_TL3X_SHAPE(schq);
+		k++;
+		break;
+	case NIX_TXSCH_LVL_TL2:
+		reg[k] = NIX_AF_TL2X_SHAPE(schq);
+		k++;
+		break;
+	default:
+		break;
+	}
+
+	return k;
+}
+
+static uint8_t
 prepare_tm_sw_xoff(struct otx2_nix_tm_node *tm_node, bool enable,
 		   volatile uint64_t *reg, volatile uint64_t *regval)
 {
@@ -1939,10 +2003,16 @@  otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
 		RTE_TM_STATS_N_PKTS_RED_DROPPED |
 		RTE_TM_STATS_N_BYTES_RED_DROPPED;
 
-	for (i = 0; i < RTE_COLORS; i++) {
-		cap->mark_vlan_dei_supported[i] = false;
-		cap->mark_ip_ecn_tcp_supported[i] = false;
-		cap->mark_ip_dscp_supported[i] = false;
+	cap->mark_vlan_dei_supported[RTE_COLOR_GREEN] = false;
+	cap->mark_ip_ecn_tcp_supported[RTE_COLOR_GREEN] = false;
+	cap->mark_ip_ecn_sctp_supported[RTE_COLOR_GREEN] = false;
+	cap->mark_ip_dscp_supported[RTE_COLOR_GREEN] = false;
+
+	for (i = RTE_COLOR_YELLOW; i < RTE_COLORS; i++) {
+		cap->mark_vlan_dei_supported[i] = true;
+		cap->mark_ip_ecn_tcp_supported[i] = true;
+		cap->mark_ip_ecn_sctp_supported[i] = true;
+		cap->mark_ip_dscp_supported[i] = true;
 	}
 
 	return 0;
@@ -2831,6 +2901,317 @@  otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
 	return rc;
 }
 
+static void
+nix_tm_mark_queue_reconfig(struct rte_eth_dev *eth_dev, bool enable)
+{
+	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+	struct otx2_eth_txq *txq;
+	int i, nb_txq;
+
+	nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
+
+	if (enable) {
+		dev->tx_offload_flags |= NIX_TX_OFFLOAD_MARK_F;
+		/* Enable L3 and OL3 CKSUM for marking IP DSCP and ECN */
+		if (dev->tm_flags & NIX_TM_MARK_IP_ECN_EN ||
+		    dev->tm_flags & NIX_TM_MARK_IP_DSCP_EN)
+			dev->tx_offload_flags |=
+				(NIX_TX_OFFLOAD_L3_L4_CSUM_F |
+				 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F);
+		otx2_tm_dbg("Enabling TM marking with tx_offload_flags=0x%04x"
+			    " markfmt_en=0x%" PRIx64, dev->tx_offload_flags,
+			    dev->tm_markfmt_en);
+	} else {
+		/* Reset to old flags excluding MARK */
+		dev->tx_offload_flags = otx2_nix_tx_offload_flags(eth_dev);
+		otx2_tm_dbg("Disabling TM marking with tx_offload_flags=0x%04x"
+			    " markfmt_en=0x%" PRIx64, dev->tx_offload_flags,
+			    dev->tm_markfmt_en);
+	}
+
+	for (i = 0; i < nb_txq; i++) {
+		txq = eth_dev->data->tx_queues[i];
+		otx2_nix_form_default_desc(txq);
+	}
+	rte_wmb();
+}
+
+/* Only called while device is stopped */
+static int
+nix_tm_update_red_algo(struct otx2_eth_dev *dev, bool red_send,
+		       struct rte_tm_error *error)
+{
+	struct otx2_mbox *mbox = dev->mbox;
+	struct otx2_nix_tm_node *tm_node;
+	struct nix_txschq_config *req;
+	uint8_t k;
+	int rc;
+
+	TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+		/* Skip leaf nodes */
+		if (nix_tm_is_leaf(dev, tm_node->lvl))
+			continue;
+
+		if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1)
+			continue;
+
+		/* Skip if no update of red_algo is needed */
+		if ((red_send && (tm_node->red_algo == NIX_REDALG_SEND)) ||
+		    (!red_send && (tm_node->red_algo != NIX_REDALG_SEND)))
+			continue;
+
+		/* Update Red algo */
+		if (red_send)
+			tm_node->red_algo = NIX_REDALG_SEND;
+		else if (tm_node->flags & NIX_TM_NODE_RED_DISCARD)
+			tm_node->red_algo = NIX_REDALG_DISCARD;
+		else
+			tm_node->red_algo = NIX_REDALG_STD;
+
+		/* Update txschq config  */
+		req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+		req->lvl = tm_node->hw_lvl;
+		k = prepare_tm_shaper_red_algo(tm_node, req->reg,
+					       req->regval, req->regval_mask);
+		req->num_regs = k;
+
+		rc = send_tm_reqval(mbox, req, error);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+/* Return's true if queue reconfig is needed */
+static bool
+nix_tm_update_markfmt(struct otx2_eth_dev *dev,
+		      enum otx2_nix_tm_mark type,
+		      int mark_yellow, int mark_red)
+{
+	uint64_t new_markfmt, old_markfmt;
+	uint8_t *tm_markfmt;
+	uint8_t en_shift;
+	uint64_t mask;
+
+	if (type >= NIX_TM_MARK_MAX)
+		return 0;
+
+	/* Pre-allocated mark formats for type:color combinations */
+	tm_markfmt = dev->tm_markfmt[type];
+
+	if (!mark_yellow && !mark_red) {
+		/* Null format to disable */
+		new_markfmt = dev->tm_markfmt_null;
+	} else {
+		/* Marking enabled with combination of yellow and red */
+		if (mark_yellow && mark_red)
+			new_markfmt = tm_markfmt[NIX_TM_MARK_COLOR_Y_R];
+		else if (mark_yellow)
+			new_markfmt = tm_markfmt[NIX_TM_MARK_COLOR_Y];
+		else
+			new_markfmt = tm_markfmt[NIX_TM_MARK_COLOR_R];
+	}
+
+	mask = 0xFFull;
+	/* Format of fast path markfmt
+	 * ipv6_ecn[8]:ipv4_ecn[8]:ipv6_dscp[8]:ipv4_dscp[8]:vlan_dei[16]
+	 * fmt[7] = ptr offset for IPv4/IPv6 on l2_len.
+	 * fmt[6:0] = markfmt idx.
+	 */
+	switch (type) {
+	case NIX_TM_MARK_VLAN_DEI:
+		en_shift = NIX_TM_MARK_VLAN_DEI_SHIFT;
+		mask = 0xFFFFull;
+		new_markfmt |= new_markfmt << 8;
+		break;
+	case NIX_TM_MARK_IPV4_DSCP:
+		new_markfmt |= BIT_ULL(7);
+		en_shift = NIX_TM_MARK_IPV4_DSCP_SHIFT;
+		break;
+	case NIX_TM_MARK_IPV4_ECN:
+		new_markfmt |= BIT_ULL(7);
+		en_shift = NIX_TM_MARK_IPV4_ECN_SHIFT;
+		break;
+	case NIX_TM_MARK_IPV6_DSCP:
+		en_shift = NIX_TM_MARK_IPV6_DSCP_SHIFT;
+		break;
+	case NIX_TM_MARK_IPV6_ECN:
+		new_markfmt |= BIT_ULL(7);
+		en_shift = NIX_TM_MARK_IPV6_ECN_SHIFT;
+		break;
+	default:
+		return 0;
+	}
+
+	/* Skip if same as old config */
+	old_markfmt = (dev->tm_markfmt_en >> en_shift) & mask;
+	if (old_markfmt == new_markfmt)
+		return false;
+
+	/* Need queue reconfig */
+	dev->tm_markfmt_en &= ~(mask << en_shift);
+	dev->tm_markfmt_en |= (new_markfmt << en_shift);
+	return true;
+}
+
+static int
+otx2_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
+			  int mark_yellow, int mark_red,
+			  struct rte_tm_error *error)
+{
+	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+	bool need_reconfig, marking;
+	int rc;
+
+	if (mark_green) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "Green VLAN marking not supported";
+		return -EINVAL;
+	}
+
+	if (eth_dev->data->dev_started) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "VLAN DEI mark for running ports not "
+				 "supported";
+		return -EBUSY;
+	}
+
+	if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "hierarchy doesn't exist";
+		return -EINVAL;
+	}
+
+	otx2_tm_dbg("VLAN DEI marking update yellow=%u red=%u",
+		    mark_yellow, mark_red);
+
+	need_reconfig = nix_tm_update_markfmt(dev, NIX_TM_MARK_VLAN_DEI,
+					      mark_yellow, mark_red);
+	if (!need_reconfig)
+		return 0;
+
+	if (!mark_yellow && !mark_red)
+		dev->tm_flags &= ~NIX_TM_MARK_VLAN_DEI_EN;
+	else
+		dev->tm_flags |= NIX_TM_MARK_VLAN_DEI_EN;
+
+	/* Update red algo for change in mark_red */
+	rc = nix_tm_update_red_algo(dev, !!mark_red, error);
+	if (rc)
+		return rc;
+
+	/* Update red algo */
+	marking = !!(dev->tm_flags & NIX_TM_MARK_EN_MASK);
+	nix_tm_mark_queue_reconfig(eth_dev, marking);
+	return 0;
+}
+
+static int
+otx2_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
+			int mark_yellow, int mark_red,
+			struct rte_tm_error *error)
+{
+	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+	bool need_reconfig, marking;
+	int rc;
+
+	if (mark_green) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "Green IP ECN marking not supported";
+		return -EINVAL;
+	}
+
+	if (eth_dev->data->dev_started) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "IP ECN mark for running ports not "
+				 "supported";
+		return -EBUSY;
+	}
+
+	if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "hierarchy doesn't exist";
+		return -EINVAL;
+	}
+
+	otx2_tm_dbg("IP ECN marking update yellow=%u red=%u",
+		    mark_yellow, mark_red);
+
+	need_reconfig = nix_tm_update_markfmt(dev, NIX_TM_MARK_IPV4_ECN,
+					      mark_yellow, mark_red);
+	need_reconfig |= nix_tm_update_markfmt(dev, NIX_TM_MARK_IPV6_ECN,
+					       mark_yellow, mark_red);
+	if (!need_reconfig)
+		return 0;
+
+	if (!mark_yellow && !mark_red)
+		dev->tm_flags &= ~NIX_TM_MARK_IP_ECN_EN;
+	else
+		dev->tm_flags |= NIX_TM_MARK_IP_ECN_EN;
+
+	/* Update red algo for change in mark_red */
+	rc = nix_tm_update_red_algo(dev, !!mark_red, error);
+	if (rc)
+		return rc;
+
+	marking = !!(dev->tm_flags & NIX_TM_MARK_EN_MASK);
+	nix_tm_mark_queue_reconfig(eth_dev, marking);
+	return 0;
+}
+
+static int
+otx2_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
+			 int mark_yellow, int mark_red,
+			 struct rte_tm_error *error)
+{
+	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+	bool need_reconfig, marking;
+	int rc;
+
+	if (mark_green) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "Green IP DSCP marking not supported";
+		return -EINVAL;
+	}
+
+	if (eth_dev->data->dev_started) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "IP DSCP mark for running ports not "
+				 "supported";
+		return -EBUSY;
+	}
+
+	if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "hierarchy doesn't exist";
+		return -EINVAL;
+	}
+
+	otx2_tm_dbg("IP DSCP marking update yellow=%u red=%u",
+		    mark_yellow, mark_red);
+
+	need_reconfig = nix_tm_update_markfmt(dev, NIX_TM_MARK_IPV4_DSCP,
+					      mark_yellow, mark_red);
+	need_reconfig |= nix_tm_update_markfmt(dev, NIX_TM_MARK_IPV6_DSCP,
+					       mark_yellow, mark_red);
+	if (!need_reconfig)
+		return 0;
+
+	if (!mark_yellow && !mark_red)
+		dev->tm_flags &= ~NIX_TM_MARK_IP_DSCP_EN;
+	else
+		dev->tm_flags |= NIX_TM_MARK_IP_DSCP_EN;
+
+	/* Update red algo for change in mark_red */
+	rc = nix_tm_update_red_algo(dev, !!mark_red, error);
+	if (rc)
+		return rc;
+
+	marking = !!(dev->tm_flags & NIX_TM_MARK_EN_MASK);
+	nix_tm_mark_queue_reconfig(eth_dev, marking);
+	return 0;
+}
+
 const struct rte_tm_ops otx2_tm_ops = {
 	.node_type_get = otx2_nix_tm_node_type_get,
 
@@ -2850,9 +3231,80 @@  const struct rte_tm_ops otx2_tm_ops = {
 	.node_shaper_update = otx2_nix_tm_node_shaper_update,
 	.node_parent_update = otx2_nix_tm_node_parent_update,
 	.node_stats_read = otx2_nix_tm_node_stats_read,
+
+	.mark_vlan_dei = otx2_nix_tm_mark_vlan_dei,
+	.mark_ip_ecn = otx2_nix_tm_mark_ip_ecn,
+	.mark_ip_dscp = otx2_nix_tm_mark_ip_dscp,
 };
 
 static int
+nix_tm_mark_init(struct otx2_eth_dev *dev)
+{
+	struct nix_mark_format_cfg_rsp *rsp;
+	struct otx2_mbox *mbox = dev->mbox;
+	struct nix_mark_format_cfg *req;
+	int rc, i, j;
+
+	/* Check for supported revisions */
+	if (otx2_dev_is_95xx_Ax(dev) ||
+	    otx2_dev_is_96xx_Ax(dev))
+		return 0;
+
+	/* Null mark format */
+	req = otx2_mbox_alloc_msg_nix_mark_format_cfg(mbox);
+	rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+	if (rc) {
+		otx2_err("TM failed to alloc null mark format, rc=%d", rc);
+		return rc;
+	}
+
+	dev->tm_markfmt_null = rsp->mark_format_idx;
+
+	/* Alloc vlan, dscp, ecn mark formats */
+	for (i = 0; i < NIX_TM_MARK_MAX; i++) {
+		for (j = 0; j < NIX_TM_MARK_COLOR_MAX; j++) {
+			req = otx2_mbox_alloc_msg_nix_mark_format_cfg(mbox);
+			req->offset = mark_off[i];
+
+			switch (j) {
+			case NIX_TM_MARK_COLOR_Y:
+				req->y_mask = y_mask_val[i][0];
+				req->y_val = y_mask_val[i][1];
+				break;
+			case NIX_TM_MARK_COLOR_R:
+				req->r_mask = r_mask_val[i][0];
+				req->r_val = r_mask_val[i][1];
+				break;
+			case NIX_TM_MARK_COLOR_Y_R:
+				req->y_mask = y_mask_val[i][0];
+				req->y_val = y_mask_val[i][1];
+				req->r_mask = r_mask_val[i][0];
+				req->r_val = r_mask_val[i][1];
+				break;
+			}
+
+			rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+			if (rc) {
+				otx2_err("TM failed to alloc mark fmt "
+					 "type %u color %u, rc=%d", i, j, rc);
+				return rc;
+			}
+
+			dev->tm_markfmt[i][j] = rsp->mark_format_idx;
+			otx2_tm_dbg("Mark type: %u, Mark Color:%u, id:%u\n",
+				    i, j, dev->tm_markfmt[i][j]);
+		}
+	}
+	/* Update null mark format as default */
+	nix_tm_update_markfmt(dev, NIX_TM_MARK_VLAN_DEI, 0, 0);
+	nix_tm_update_markfmt(dev, NIX_TM_MARK_IPV4_DSCP, 0, 0);
+	nix_tm_update_markfmt(dev, NIX_TM_MARK_IPV4_ECN, 0, 0);
+	nix_tm_update_markfmt(dev, NIX_TM_MARK_IPV6_DSCP, 0, 0);
+	nix_tm_update_markfmt(dev, NIX_TM_MARK_IPV6_ECN, 0, 0);
+	return 0;
+}
+
+static int
 nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)
 {
 	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
@@ -2963,7 +3415,7 @@  void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
 int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
-	struct otx2_eth_dev  *dev = otx2_eth_pmd_priv(eth_dev);
+	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
 	uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
 	int rc;
 
@@ -2985,6 +3437,10 @@  int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
 	if (pci_dev->max_vfs)
 		dev->tm_flags |= NIX_TM_TL1_NO_SP;
 
+	rc = nix_tm_mark_init(dev);
+	if (rc != 0)
+		return rc;
+
 	rc = nix_tm_prepare_default_tree(eth_dev);
 	if (rc != 0)
 		return rc;
@@ -2992,8 +3448,8 @@  int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
 	rc = nix_tm_alloc_resources(eth_dev, false);
 	if (rc != 0)
 		return rc;
+
 	dev->tm_leaf_cnt = sq_cnt;
-
 	return 0;
 }
 
diff --git a/drivers/net/octeontx2/otx2_tm.h b/drivers/net/octeontx2/otx2_tm.h
index db44d48..afc796a 100644
--- a/drivers/net/octeontx2/otx2_tm.h
+++ b/drivers/net/octeontx2/otx2_tm.h
@@ -13,6 +13,37 @@ 
 #define NIX_TM_COMMITTED	BIT_ULL(1)
 #define NIX_TM_RATE_LIMIT_TREE	BIT_ULL(2)
 #define NIX_TM_TL1_NO_SP	BIT_ULL(3)
+#define NIX_TM_MARK_IP_DSCP_EN	BIT_ULL(4)
+#define NIX_TM_MARK_IP_ECN_EN	BIT_ULL(5)
+#define NIX_TM_MARK_VLAN_DEI_EN	BIT_ULL(6)
+
+
+#define NIX_TM_MARK_EN_MASK	(NIX_TM_MARK_IP_DSCP_EN | \
+				 NIX_TM_MARK_IP_ECN_EN |  \
+				 NIX_TM_MARK_VLAN_DEI_EN)
+
+/* Shift positions for struct otx2_eth_dev::tm_markfmt_en field */
+#define NIX_TM_MARK_VLAN_DEI_SHIFT	0 /* Leave 16b for VLAN for FP logic */
+#define NIX_TM_MARK_IPV4_DSCP_SHIFT	16
+#define NIX_TM_MARK_IPV6_DSCP_SHIFT	24
+#define NIX_TM_MARK_IPV4_ECN_SHIFT	32
+#define NIX_TM_MARK_IPV6_ECN_SHIFT	40
+
+enum otx2_nix_tm_mark {
+	NIX_TM_MARK_VLAN_DEI,
+	NIX_TM_MARK_IPV4_DSCP,
+	NIX_TM_MARK_IPV4_ECN,
+	NIX_TM_MARK_IPV6_DSCP,
+	NIX_TM_MARK_IPV6_ECN,
+	NIX_TM_MARK_MAX
+};
+
+enum otx2_nix_tm_mark_color {
+	NIX_TM_MARK_COLOR_Y,
+	NIX_TM_MARK_COLOR_R,
+	NIX_TM_MARK_COLOR_Y_R,
+	NIX_TM_MARK_COLOR_MAX
+};
 
 struct otx2_eth_dev;
 
diff --git a/drivers/net/octeontx2/otx2_tx.h b/drivers/net/octeontx2/otx2_tx.h
index 3c43170..928e4ea 100644
--- a/drivers/net/octeontx2/otx2_tx.h
+++ b/drivers/net/octeontx2/otx2_tx.h
@@ -13,6 +13,7 @@ 
 #define NIX_TX_OFFLOAD_TSTAMP_F		BIT(4)
 #define NIX_TX_OFFLOAD_TSO_F		BIT(5)
 #define NIX_TX_OFFLOAD_SECURITY_F	BIT(6)
+#define NIX_TX_OFFLOAD_MARK_F		BIT(7)
 
 /* Flags to control xmit_prepare function.
  * Defining it from backwards to denote its been
@@ -22,11 +23,12 @@ 
 
 #define NIX_TX_NEED_SEND_HDR_W1	\
 	(NIX_TX_OFFLOAD_L3_L4_CSUM_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |	\
-	 NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
+	 NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F | \
+	 NIX_TX_OFFLOAD_MARK_F)
 
 #define NIX_TX_NEED_EXT_HDR \
 	(NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSTAMP_F | \
-	 NIX_TX_OFFLOAD_TSO_F)
+	 NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_MARK_F)
 
 #define NIX_UDP_TUN_BITMASK \
 	((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) | \
@@ -42,7 +44,8 @@  static __rte_always_inline int
 otx2_nix_tx_ext_subs(const uint16_t flags)
 {
 	return (flags & NIX_TX_OFFLOAD_TSTAMP_F) ? 2 :
-		((flags & (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)) ?
+		((flags & (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F |
+			   NIX_TX_OFFLOAD_MARK_F)) ?
 		 1 : 0);
 }