@@ -1002,7 +1002,7 @@ init_op_data_objs(struct rte_bbdev_op_data *bufs,
seg->length);
memcpy(data, seg->addr, seg->length);
m_head->buf_addr = data;
- m_head->buf_iova = rte_malloc_virt2iova(data);
+ rte_mbuf_iova_set(m_head, rte_malloc_virt2iova(data));
m_head->data_off = 0;
m_head->data_len = seg->length;
} else {
@@ -26,8 +26,7 @@ fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
/* start of buffer is after mbuf structure and priv data */
m->priv_size = 0;
m->buf_addr = (char *)m + mbuf_hdr_size;
- m->buf_iova = rte_mempool_virt2iova(obj) +
- mbuf_offset + mbuf_hdr_size;
+ rte_mbuf_iova_set(m, rte_mempool_virt2iova(obj) + mbuf_offset + mbuf_hdr_size);
m->buf_len = segment_sz;
m->data_len = data_len;
m->pkt_len = data_len;
@@ -58,7 +57,7 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
/* start of buffer is after mbuf structure and priv data */
m->priv_size = 0;
m->buf_addr = (char *)m + mbuf_hdr_size;
- m->buf_iova = next_seg_phys_addr;
+ rte_mbuf_iova_set(m, next_seg_phys_addr);
next_seg_phys_addr += mbuf_hdr_size + segment_sz;
m->buf_len = segment_sz;
m->data_len = data_len;
@@ -2600,7 +2600,7 @@ dummy_mbuf_prep(struct rte_mbuf *mb, uint8_t buf[], uint32_t buf_len,
uint8_t *db;
mb->buf_addr = buf;
- mb->buf_iova = (uintptr_t)buf;
+ rte_mbuf_iova_set(mb, (uintptr_t)buf);
mb->buf_len = buf_len;
rte_mbuf_refcnt_set(mb, 1);
@@ -1233,7 +1233,7 @@ test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool)
}
badbuf = *buf;
- badbuf.buf_iova = 0;
+ rte_mbuf_iova_set(&badbuf, 0);
if (verify_mbuf_check_panics(&badbuf)) {
printf("Error with bad-physaddr mbuf test\n");
return -1;
@@ -40,7 +40,7 @@ dummy_mbuf_prep(struct rte_mbuf *mb, uint8_t buf[], uint32_t buf_len,
uint8_t *db;
mb->buf_addr = buf;
- mb->buf_iova = (uintptr_t)buf;
+ rte_mbuf_iova_set(mb, (uintptr_t)buf);
mb->buf_len = buf_len;
rte_mbuf_refcnt_set(mb, 1);
@@ -357,8 +357,7 @@ static void *
va2pa(struct rte_mbuf *m)
{
return (void *)((unsigned long)m -
- ((unsigned long)m->buf_addr -
- (unsigned long)m->buf_iova));
+ ((unsigned long)m->buf_addr - (unsigned long)rte_mbuf_iova_get(m)));
}
static void *
@@ -89,7 +89,7 @@ rte_pktmbuf_init(struct rte_mempool *mp,
/* start of buffer is after mbuf structure and priv data */
m->priv_size = priv_size;
m->buf_addr = (char *)m + mbuf_size;
- m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
+ rte_mbuf_iova_set(m, rte_mempool_virt2iova(m) + mbuf_size);
m->buf_len = (uint16_t)buf_len;
/* keep some headroom between start of buffer and data */
@@ -187,8 +187,8 @@ __rte_pktmbuf_init_extmem(struct rte_mempool *mp,
RTE_ASSERT(ctx->off + ext_mem->elt_size <= ext_mem->buf_len);
m->buf_addr = RTE_PTR_ADD(ext_mem->buf_ptr, ctx->off);
- m->buf_iova = ext_mem->buf_iova == RTE_BAD_IOVA ?
- RTE_BAD_IOVA : (ext_mem->buf_iova + ctx->off);
+ rte_mbuf_iova_set(m, ext_mem->buf_iova == RTE_BAD_IOVA ? RTE_BAD_IOVA :
+ (ext_mem->buf_iova + ctx->off));
ctx->off += ext_mem->elt_size;
if (ctx->off + ext_mem->elt_size > ext_mem->buf_len) {
@@ -388,7 +388,7 @@ int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
*reason = "bad mbuf pool";
return -1;
}
- if (m->buf_iova == 0) {
+ if (rte_mbuf_iova_get(m) == 0) {
*reason = "bad IO addr";
return -1;
}
@@ -669,8 +669,8 @@ rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
__rte_mbuf_sanity_check(m, 1);
- fprintf(f, "dump mbuf at %p, iova=%#"PRIx64", buf_len=%u\n",
- m, m->buf_iova, m->buf_len);
+ fprintf(f, "dump mbuf at %p, iova=%#" PRIx64 ", buf_len=%u\n", m, rte_mbuf_iova_get(m),
+ m->buf_len);
fprintf(f, " pkt_len=%u, ol_flags=%#"PRIx64", nb_segs=%u, port=%u",
m->pkt_len, m->ol_flags, m->nb_segs, m->port);
@@ -135,6 +135,34 @@ rte_mbuf_prefetch_part2(struct rte_mbuf *m)
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
+/**
+ * Get the IOVA address of the mbuf data buffer.
+ *
+ * @param m
+ * The pointer to the mbuf.
+ * @return
+ * The IOVA address of the mbuf.
+ */
+static inline rte_iova_t
+rte_mbuf_iova_get(const struct rte_mbuf *m)
+{
+ return m->buf_iova;
+}
+
+/**
+ * Set the IOVA address of the mbuf data buffer
+ *
+ * @param m
+ * The pointer to the mbuf.
+ * @param iova
+ * Value to set as IOVA address of the mbuf.
+ */
+static inline void
+rte_mbuf_iova_set(struct rte_mbuf *m, rte_iova_t iova)
+{
+ m->buf_iova = iova;
+}
+
/**
* Return the IO address of the beginning of the mbuf data
*
@@ -146,7 +174,7 @@ static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
static inline rte_iova_t
rte_mbuf_data_iova(const struct rte_mbuf *mb)
{
- return mb->buf_iova + mb->data_off;
+ return rte_mbuf_iova_get(mb) + mb->data_off;
}
/**
@@ -164,7 +192,7 @@ rte_mbuf_data_iova(const struct rte_mbuf *mb)
static inline rte_iova_t
rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
{
- return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+ return rte_mbuf_iova_get(mb) + RTE_PKTMBUF_HEADROOM;
}
/**
@@ -1053,7 +1081,7 @@ rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
RTE_ASSERT(shinfo->free_cb != NULL);
m->buf_addr = buf_addr;
- m->buf_iova = buf_iova;
+ rte_mbuf_iova_set(m, buf_iova);
m->buf_len = buf_len;
m->data_len = 0;
@@ -1140,7 +1168,7 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
mi->data_off = m->data_off;
mi->data_len = m->data_len;
- mi->buf_iova = m->buf_iova;
+ rte_mbuf_iova_set(mi, rte_mbuf_iova_get(m));
mi->buf_addr = m->buf_addr;
mi->buf_len = m->buf_len;
@@ -1242,7 +1270,7 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
m->priv_size = priv_size;
m->buf_addr = (char *)m + mbuf_size;
- m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
+ rte_mbuf_iova_set(m, rte_mempool_virt2iova(m) + mbuf_size);
m->buf_len = (uint16_t)buf_len;
rte_pktmbuf_reset_headroom(m);
m->data_len = 0;
@@ -736,8 +736,7 @@ struct rte_mbuf_ext_shared_info {
* @param o
* The offset into the data to calculate address from.
*/
-#define rte_pktmbuf_iova_offset(m, o) \
- (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
+#define rte_pktmbuf_iova_offset(m, o) (rte_iova_t)(rte_mbuf_iova_get(m) + (m)->data_off + (o))
/**
* A macro that returns the IO address that points to the start of the
@@ -1923,7 +1923,7 @@ pkt_work_sym_crypto(struct rte_mbuf *mbuf, struct sym_crypto_data *data,
op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
- op->phys_addr = mbuf->buf_iova + cfg->op_offset - sizeof(*mbuf);
+ op->phys_addr = rte_mbuf_iova_get(mbuf) + cfg->op_offset - sizeof(*mbuf);
op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
sym->m_src = mbuf;
sym->m_dst = NULL;
@@ -971,7 +971,7 @@ restore_mbuf(struct rte_mbuf *m)
/* start of buffer is after mbuf structure and priv data */
m->buf_addr = (char *)m + mbuf_size;
- m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
+ rte_mbuf_iova_set(m, rte_mempool_virt2iova(m) + mbuf_size);
m = m->next;
}
}
@@ -807,11 +807,10 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
switch (vcrypto->option) {
case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
m_src->data_len = cipher->para.src_data_len;
- m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
- cipher->para.src_data_len);
+ rte_mbuf_iova_set(m_src,
+ gpa_to_hpa(vcrypto->dev, desc->addr, cipher->para.src_data_len));
m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
- if (unlikely(m_src->buf_iova == 0 ||
- m_src->buf_addr == NULL)) {
+ if (unlikely(rte_mbuf_iova_get(m_src) == 0 || m_src->buf_addr == NULL)) {
VC_LOG_ERR("zero_copy may fail due to cross page data");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -851,10 +850,10 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
switch (vcrypto->option) {
case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
- m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
- desc->addr, cipher->para.dst_data_len);
+ rte_mbuf_iova_set(m_dst,
+ gpa_to_hpa(vcrypto->dev, desc->addr, cipher->para.dst_data_len));
m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
- if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
+ if (unlikely(rte_mbuf_iova_get(m_dst) == 0 || m_dst->buf_addr == NULL)) {
VC_LOG_ERR("zero_copy may fail due to cross page data");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -965,10 +964,10 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
m_src->data_len = chain->para.src_data_len;
m_dst->data_len = chain->para.dst_data_len;
- m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
- chain->para.src_data_len);
+ rte_mbuf_iova_set(m_src,
+ gpa_to_hpa(vcrypto->dev, desc->addr, chain->para.src_data_len));
m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
- if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
+ if (unlikely(rte_mbuf_iova_get(m_src) == 0 || m_src->buf_addr == NULL)) {
VC_LOG_ERR("zero_copy may fail due to cross page data");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -1008,10 +1007,10 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
switch (vcrypto->option) {
case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
- m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
- desc->addr, chain->para.dst_data_len);
+ rte_mbuf_iova_set(m_dst,
+ gpa_to_hpa(vcrypto->dev, desc->addr, chain->para.dst_data_len));
m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
- if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
+ if (unlikely(rte_mbuf_iova_get(m_dst) == 0 || m_dst->buf_addr == NULL)) {
VC_LOG_ERR("zero_copy may fail due to cross page data");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;