@@ -1310,6 +1310,39 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
}
+/**
+ * Lookup mbuf field and flag for Rx timestamp if offload requested.
+ *
+ * @param rxq_data
+ * Datapath struct where field offset and flag mask are stored.
+ *
+ * @return
+ * 0 on success or offload disabled, negative errno otherwise.
+ */
+static int
+mlx5_rx_timestamp_setup(struct mlx5_rxq_data *rxq_data)
+{
+ int timestamp_rx_dynflag_offset;
+
+ rxq_data->timestamp_rx_flag = 0;
+ if (rxq_data->hw_timestamp == 0)
+ return 0;
+ rxq_data->timestamp_offset = rte_mbuf_dynfield_lookup(
+ RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
+ if (rxq_data->timestamp_offset < 0) {
+ DRV_LOG(ERR, "Cannot lookup timestamp field\n");
+ return -rte_errno;
+ }
+ timestamp_rx_dynflag_offset = rte_mbuf_dynflag_lookup(
+ RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME, NULL);
+ if (timestamp_rx_dynflag_offset < 0) {
+ DRV_LOG(ERR, "Cannot lookup Rx timestamp flag\n");
+ return -rte_errno;
+ }
+ rxq_data->timestamp_rx_flag = RTE_BIT64(timestamp_rx_dynflag_offset);
+ return 0;
+}
+
/**
* Create a DPDK Rx queue.
*
@@ -1492,7 +1525,10 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
/* Toggle RX checksum offload if hardware supports it. */
tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+ /* Configure Rx timestamp. */
tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+ if (mlx5_rx_timestamp_setup(&tmpl->rxq) != 0)
+ goto error;
/* Configure VLAN stripping. */
tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
@@ -1287,8 +1287,8 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
if (rxq->rt_timestamp)
ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts);
- pkt->timestamp = ts;
- pkt->ol_flags |= PKT_RX_TIMESTAMP;
+ mlx5_timestamp_set(pkt, rxq->timestamp_offset, ts);
+ pkt->ol_flags |= rxq->timestamp_rx_flag;
}
}
@@ -151,6 +151,8 @@ struct mlx5_rxq_data {
/* CQ (UAR) access lock required for 32bit implementations */
#endif
uint32_t tunnel; /* Tunnel information. */
+ int timestamp_offset; /* Dynamic mbuf field for timestamp. */
+ uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */
uint64_t flow_meta_mask;
int32_t flow_meta_offset;
} __rte_cache_aligned;
@@ -681,4 +683,21 @@ mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts)
return ci;
}
+/**
+ * Set timestamp in mbuf dynamic field.
+ *
+ * @param mbuf
+ * Structure to write into.
+ * @param offset
+ * Dynamic field offset in mbuf structure.
+ * @param timestamp
+ * Value to write.
+ */
+static __rte_always_inline void
+mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset,
+ rte_mbuf_timestamp_t timestamp)
+{
+ *RTE_MBUF_DYNFIELD(mbuf, offset, rte_mbuf_timestamp_t *) = timestamp;
+}
+
#endif /* RTE_PMD_MLX5_RXTX_H_ */
@@ -330,13 +330,13 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
vector unsigned char ol_flags = (vector unsigned char)
(vector unsigned int){
rxq->rss_hash * PKT_RX_RSS_HASH |
- rxq->hw_timestamp * PKT_RX_TIMESTAMP,
+ rxq->hw_timestamp * rxq->timestamp_rx_flag,
rxq->rss_hash * PKT_RX_RSS_HASH |
- rxq->hw_timestamp * PKT_RX_TIMESTAMP,
+ rxq->hw_timestamp * rxq->timestamp_rx_flag,
rxq->rss_hash * PKT_RX_RSS_HASH |
- rxq->hw_timestamp * PKT_RX_TIMESTAMP,
+ rxq->hw_timestamp * rxq->timestamp_rx_flag,
rxq->rss_hash * PKT_RX_RSS_HASH |
- rxq->hw_timestamp * PKT_RX_TIMESTAMP};
+ rxq->hw_timestamp * rxq->timestamp_rx_flag};
vector unsigned char cv_flags;
const vector unsigned char zero = (vector unsigned char){0};
const vector unsigned char ptype_mask =
@@ -1025,31 +1025,32 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
/* D.5 fill in mbuf - rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
if (rxq->hw_timestamp) {
+ int offset = rxq->timestamp_offset;
if (rxq->rt_timestamp) {
struct mlx5_dev_ctx_shared *sh = rxq->sh;
uint64_t ts;
ts = rte_be_to_cpu_64(cq[pos].timestamp);
- pkts[pos]->timestamp =
- mlx5_txpp_convert_rx_ts(sh, ts);
+ mlx5_timestamp_set(pkts[pos], offset,
+ mlx5_txpp_convert_rx_ts(sh, ts));
ts = rte_be_to_cpu_64(cq[pos + p1].timestamp);
- pkts[pos + 1]->timestamp =
- mlx5_txpp_convert_rx_ts(sh, ts);
+ mlx5_timestamp_set(pkts[pos + 1], offset,
+ mlx5_txpp_convert_rx_ts(sh, ts));
ts = rte_be_to_cpu_64(cq[pos + p2].timestamp);
- pkts[pos + 2]->timestamp =
- mlx5_txpp_convert_rx_ts(sh, ts);
+ mlx5_timestamp_set(pkts[pos + 2], offset,
+ mlx5_txpp_convert_rx_ts(sh, ts));
ts = rte_be_to_cpu_64(cq[pos + p3].timestamp);
- pkts[pos + 3]->timestamp =
- mlx5_txpp_convert_rx_ts(sh, ts);
+ mlx5_timestamp_set(pkts[pos + 3], offset,
+ mlx5_txpp_convert_rx_ts(sh, ts));
} else {
- pkts[pos]->timestamp = rte_be_to_cpu_64
- (cq[pos].timestamp);
- pkts[pos + 1]->timestamp = rte_be_to_cpu_64
- (cq[pos + p1].timestamp);
- pkts[pos + 2]->timestamp = rte_be_to_cpu_64
- (cq[pos + p2].timestamp);
- pkts[pos + 3]->timestamp = rte_be_to_cpu_64
- (cq[pos + p3].timestamp);
+ mlx5_timestamp_set(pkts[pos], offset,
+ rte_be_to_cpu_64(cq[pos].timestamp));
+ mlx5_timestamp_set(pkts[pos + 1], offset,
+ rte_be_to_cpu_64(cq[pos + p1].timestamp));
+ mlx5_timestamp_set(pkts[pos + 2], offset,
+ rte_be_to_cpu_64(cq[pos + p2].timestamp));
+ mlx5_timestamp_set(pkts[pos + 3], offset,
+ rte_be_to_cpu_64(cq[pos + p3].timestamp));
}
}
if (rxq->dynf_meta) {
@@ -271,7 +271,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
uint32x4_t pinfo, cv_flags;
uint32x4_t ol_flags =
vdupq_n_u32(rxq->rss_hash * PKT_RX_RSS_HASH |
- rxq->hw_timestamp * PKT_RX_TIMESTAMP);
+ rxq->hw_timestamp * rxq->timestamp_rx_flag);
const uint32x4_t ptype_ol_mask = { 0x106, 0x106, 0x106, 0x106 };
const uint8x16_t cv_flag_sel = {
0,
@@ -697,6 +697,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
opcode, &elts[pos]);
if (rxq->hw_timestamp) {
+ int offset = rxq->timestamp_offset;
if (rxq->rt_timestamp) {
struct mlx5_dev_ctx_shared *sh = rxq->sh;
uint64_t ts;
@@ -704,36 +705,36 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
ts = rte_be_to_cpu_64
(container_of(p0, struct mlx5_cqe,
pkt_info)->timestamp);
- elts[pos]->timestamp =
- mlx5_txpp_convert_rx_ts(sh, ts);
+ mlx5_timestamp_set(elts[pos], offset,
+ mlx5_txpp_convert_rx_ts(sh, ts));
ts = rte_be_to_cpu_64
(container_of(p1, struct mlx5_cqe,
pkt_info)->timestamp);
- elts[pos + 1]->timestamp =
- mlx5_txpp_convert_rx_ts(sh, ts);
+ mlx5_timestamp_set(elts[pos + 1], offset,
+ mlx5_txpp_convert_rx_ts(sh, ts));
ts = rte_be_to_cpu_64
(container_of(p2, struct mlx5_cqe,
pkt_info)->timestamp);
- elts[pos + 2]->timestamp =
- mlx5_txpp_convert_rx_ts(sh, ts);
+ mlx5_timestamp_set(elts[pos + 2], offset,
+ mlx5_txpp_convert_rx_ts(sh, ts));
ts = rte_be_to_cpu_64
(container_of(p3, struct mlx5_cqe,
pkt_info)->timestamp);
- elts[pos + 3]->timestamp =
- mlx5_txpp_convert_rx_ts(sh, ts);
+ mlx5_timestamp_set(elts[pos + 3], offset,
+ mlx5_txpp_convert_rx_ts(sh, ts));
} else {
- elts[pos]->timestamp = rte_be_to_cpu_64
- (container_of(p0, struct mlx5_cqe,
- pkt_info)->timestamp);
- elts[pos + 1]->timestamp = rte_be_to_cpu_64
- (container_of(p1, struct mlx5_cqe,
- pkt_info)->timestamp);
- elts[pos + 2]->timestamp = rte_be_to_cpu_64
- (container_of(p2, struct mlx5_cqe,
- pkt_info)->timestamp);
- elts[pos + 3]->timestamp = rte_be_to_cpu_64
- (container_of(p3, struct mlx5_cqe,
- pkt_info)->timestamp);
+ mlx5_timestamp_set(elts[pos], offset,
+ rte_be_to_cpu_64(container_of(p0,
+ struct mlx5_cqe, pkt_info)->timestamp));
+ mlx5_timestamp_set(elts[pos + 1], offset,
+ rte_be_to_cpu_64(container_of(p1,
+ struct mlx5_cqe, pkt_info)->timestamp));
+ mlx5_timestamp_set(elts[pos + 2], offset,
+ rte_be_to_cpu_64(container_of(p2,
+ struct mlx5_cqe, pkt_info)->timestamp));
+ mlx5_timestamp_set(elts[pos + 3], offset,
+ rte_be_to_cpu_64(container_of(p3,
+ struct mlx5_cqe, pkt_info)->timestamp));
}
}
if (rxq->dynf_meta) {
@@ -251,7 +251,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
__m128i pinfo0, pinfo1;
__m128i pinfo, ptype;
__m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH |
- rxq->hw_timestamp * PKT_RX_TIMESTAMP);
+ rxq->hw_timestamp * rxq->timestamp_rx_flag);
__m128i cv_flags;
const __m128i zero = _mm_setzero_si128();
const __m128i ptype_mask =
@@ -656,31 +656,32 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
/* D.5 fill in mbuf - rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
if (rxq->hw_timestamp) {
+ int offset = rxq->timestamp_offset;
if (rxq->rt_timestamp) {
struct mlx5_dev_ctx_shared *sh = rxq->sh;
uint64_t ts;
ts = rte_be_to_cpu_64(cq[pos].timestamp);
- pkts[pos]->timestamp =
- mlx5_txpp_convert_rx_ts(sh, ts);
+ mlx5_timestamp_set(pkts[pos], offset,
+ mlx5_txpp_convert_rx_ts(sh, ts));
ts = rte_be_to_cpu_64(cq[pos + p1].timestamp);
- pkts[pos + 1]->timestamp =
- mlx5_txpp_convert_rx_ts(sh, ts);
+ mlx5_timestamp_set(pkts[pos + 1], offset,
+ mlx5_txpp_convert_rx_ts(sh, ts));
ts = rte_be_to_cpu_64(cq[pos + p2].timestamp);
- pkts[pos + 2]->timestamp =
- mlx5_txpp_convert_rx_ts(sh, ts);
+ mlx5_timestamp_set(pkts[pos + 2], offset,
+ mlx5_txpp_convert_rx_ts(sh, ts));
ts = rte_be_to_cpu_64(cq[pos + p3].timestamp);
- pkts[pos + 3]->timestamp =
- mlx5_txpp_convert_rx_ts(sh, ts);
+ mlx5_timestamp_set(pkts[pos + 3], offset,
+ mlx5_txpp_convert_rx_ts(sh, ts));
} else {
- pkts[pos]->timestamp = rte_be_to_cpu_64
- (cq[pos].timestamp);
- pkts[pos + 1]->timestamp = rte_be_to_cpu_64
- (cq[pos + p1].timestamp);
- pkts[pos + 2]->timestamp = rte_be_to_cpu_64
- (cq[pos + p2].timestamp);
- pkts[pos + 3]->timestamp = rte_be_to_cpu_64
- (cq[pos + p3].timestamp);
+ mlx5_timestamp_set(pkts[pos], offset,
+ rte_be_to_cpu_64(cq[pos].timestamp));
+ mlx5_timestamp_set(pkts[pos + 1], offset,
+ rte_be_to_cpu_64(cq[pos + p1].timestamp));
+ mlx5_timestamp_set(pkts[pos + 2], offset,
+ rte_be_to_cpu_64(cq[pos + p2].timestamp));
+ mlx5_timestamp_set(pkts[pos + 3], offset,
+ rte_be_to_cpu_64(cq[pos + p3].timestamp));
}
}
if (rxq->dynf_meta) {