[v6] ip_frag: add IPv4 fragment copy packet API
Checks
Commit Message
Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
for fast release of mbufs. When set application must guarantee that
per-queue all mbufs comes from the same mempool,has refcnt = 1,direct
and non-segmented.)offload. In order to adapt to this offload function,
add this API. Add some test data for this API.
Signed-off-by: Huichao Cai <chcchc88@163.com>
---
app/test/test_ipfrag.c | 9 +-
lib/ip_frag/rte_ip_frag.h | 34 +++++++
lib/ip_frag/rte_ipv4_fragmentation.c | 175 +++++++++++++++++++++++++++++++++++
lib/ip_frag/version.map | 1 +
4 files changed, 218 insertions(+), 1 deletion(-)
Comments
On Sun, 24 Jul 2022 16:10:03 +0800
Huichao Cai <chcchc88@163.com> wrote:
> +
> + /*
> + * Formal parameter checking.
> + */
> + if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
> + unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
> + unlikely(mtu_size < RTE_ETHER_MIN_MTU))
> + return -EINVAL;
> +
> + in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
> + header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
> + RTE_IPV4_IHL_MULTIPLIER;
> +
> + /* Check IP header length */
> + if (unlikely(pkt_in->data_len < header_len) ||
> + unlikely(mtu_size < header_len))
> + return -EINVAL;
> +
My suspicions are all this input parameter checking probably costs more
than any performance gain of having a non-segmented fast path.
At 2022-07-25 23:42:06, "Stephen Hemminger" <stephen@networkplumber.org> wrote:
>On Sun, 24 Jul 2022 16:10:03 +0800
>Huichao Cai <chcchc88@163.com> wrote:
>
>> +
>> + /*
>> + * Formal parameter checking.
>> + */
>> + if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
>> + unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
>> + unlikely(mtu_size < RTE_ETHER_MIN_MTU))
>> + return -EINVAL;
>> +
>> + in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
>> + header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
>> + RTE_IPV4_IHL_MULTIPLIER;
>> +
>> + /* Check IP header length */
>> + if (unlikely(pkt_in->data_len < header_len) ||
>> + unlikely(mtu_size < header_len))
>> + return -EINVAL;
>> +
>
>My suspicions are all this input parameter checking probably costs more
>than any performance gain of having a non-segmented fast path.
These checks are consistent with the rte_ipv4_fragment_packet function.
I think these have been tested for performance.If these checks do affect performance,
perhaps the legitimacy of the variable is better guaranteed by the caller
24/07/2022 09:10, Huichao Cai пишет:
> Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
> for fast release of mbufs. When set application must guarantee that
> per-queue all mbufs comes from the same mempool,has refcnt = 1,direct
> and non-segmented.)offload. In order to adapt to this offload function,
> add this API. Add some test data for this API.
>
> Signed-off-by: Huichao Cai <chcchc88@163.com>
> ---
> app/test/test_ipfrag.c | 9 +-
> lib/ip_frag/rte_ip_frag.h | 34 +++++++
> lib/ip_frag/rte_ipv4_fragmentation.c | 175 +++++++++++++++++++++++++++++++++++
> lib/ip_frag/version.map | 1 +
> 4 files changed, 218 insertions(+), 1 deletion(-)
>
> diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c
> index ba0ffd0..88cc4cd 100644
> --- a/app/test/test_ipfrag.c
> +++ b/app/test/test_ipfrag.c
> @@ -418,10 +418,17 @@ static void ut_teardown(void)
> }
>
> if (tests[i].ipv == 4)
> - len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
> + if (i % 2)
> + len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
> tests[i].mtu_size,
> direct_pool,
> indirect_pool);
> + else
> + len = rte_ipv4_fragment_copy_nonseg_packet(b,
> + pkts_out,
> + BURST,
> + tests[i].mtu_size,
> + direct_pool);
> else if (tests[i].ipv == 6)
> len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
> tests[i].mtu_size,
> diff --git a/lib/ip_frag/rte_ip_frag.h b/lib/ip_frag/rte_ip_frag.h
> index 7d2abe1..4a2b150 100644
> --- a/lib/ip_frag/rte_ip_frag.h
> +++ b/lib/ip_frag/rte_ip_frag.h
> @@ -179,6 +179,40 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
> struct rte_mempool *pool_indirect);
>
> /**
> + * IPv4 fragmentation by copy.
> + *
> + * This function implements the fragmentation of IPv4 packets by copy
> + * non-segmented mbuf.
> + * This function is mainly used to adapt TX MBUF_FAST_FREE offload.
> + * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
> + * When set application must guarantee that per-queue all mbufs comes from
> + * the same mempool,has refcnt = 1,direct and non-segmented.
> + *
> + * @param pkt_in
> + * The input packet.
> + * @param pkts_out
> + * Array storing the output fragments.
> + * @param nb_pkts_out
> + * Number of fragments.
> + * @param mtu_size
> + * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
> + * datagrams. This value includes the size of the IPv4 header.
> + * @param pool_direct
> + * MBUF pool used for allocating direct buffers for the output fragments.
> + * @return
> + * Upon successful completion - number of output fragments placed
> + * in the pkts_out array.
> + * Otherwise - (-1) * errno.
> + */
> +__rte_experimental
> +int32_t
> +rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
> + struct rte_mbuf **pkts_out,
> + uint16_t nb_pkts_out,
> + uint16_t mtu_size,
> + struct rte_mempool *pool_direct);
> +
> +/**
> * This function implements reassembly of fragmented IPv4 packets.
> * Incoming mbufs should have its l2_len/l3_len fields setup correctly.
> *
> diff --git a/lib/ip_frag/rte_ipv4_fragmentation.c b/lib/ip_frag/rte_ipv4_fragmentation.c
> index 27a8ad2..e6ec408 100644
> --- a/lib/ip_frag/rte_ipv4_fragmentation.c
> +++ b/lib/ip_frag/rte_ipv4_fragmentation.c
> @@ -259,3 +259,178 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
>
> return out_pkt_pos;
> }
> +
> +/**
> + * IPv4 fragmentation by copy.
> + *
> + * This function implements the fragmentation of IPv4 packets by copy
> + * non-segmented mbuf.
> + * This function is mainly used to adapt TX MBUF_FAST_FREE offload.
> + * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
> + * When set application must guarantee that per-queue all mbufs comes from
> + * the same mempool,has refcnt = 1,direct and non-segmented.
> + *
> + * @param pkt_in
> + * The input packet.
> + * @param pkts_out
> + * Array storing the output fragments.
> + * @param nb_pkts_out
> + * Number of fragments.
> + * @param mtu_size
> + * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
> + * datagrams. This value includes the size of the IPv4 header.
> + * @param pool_direct
> + * MBUF pool used for allocating direct buffers for the output fragments.
> + * @return
> + * Upon successful completion - number of output fragments placed
> + * in the pkts_out array.
> + * Otherwise - (-1) * errno.
> + */
> +int32_t
> +rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
> + struct rte_mbuf **pkts_out,
> + uint16_t nb_pkts_out,
> + uint16_t mtu_size,
> + struct rte_mempool *pool_direct)
> +{
> + struct rte_mbuf *in_seg = NULL;
> + struct rte_ipv4_hdr *in_hdr;
> + uint32_t out_pkt_pos, in_seg_data_pos;
> + uint32_t more_in_segs;
> + uint16_t fragment_offset, flag_offset, frag_size, header_len;
> + uint16_t frag_bytes_remaining;
> + uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
> + uint16_t ipopt_len;
> +
> + /*
> + * Formal parameter checking.
> + */
> + if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
> + unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
> + unlikely(mtu_size < RTE_ETHER_MIN_MTU))
> + return -EINVAL;
> +
> + in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
> + header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
> + RTE_IPV4_IHL_MULTIPLIER;
> +
> + /* Check IP header length */
> + if (unlikely(pkt_in->data_len < header_len) ||
> + unlikely(mtu_size < header_len))
> + return -EINVAL;
> +
> + /*
> + * Ensure the IP payload length of all fragments is aligned to a
> + * multiple of 8 bytes as per RFC791 section 2.3.
> + */
> + frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
> + IPV4_HDR_FO_ALIGN);
> +
> + flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
> +
> + /* If Don't Fragment flag is set */
> + if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
> + return -ENOTSUP;
> +
> + /* Check that pkts_out is big enough to hold all fragments */
> + if (unlikely(frag_size * nb_pkts_out <
> + (uint16_t)(pkt_in->pkt_len - header_len)))
> + return -EINVAL;
> +
> + in_seg = pkt_in;
> + in_seg_data_pos = header_len;
> + out_pkt_pos = 0;
> + fragment_offset = 0;
> +
> + ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
> + if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
> + return -EINVAL;
> +
> + more_in_segs = 1;
> + while (likely(more_in_segs)) {
> + struct rte_mbuf *out_pkt = NULL;
> + uint32_t more_out_segs;
> + struct rte_ipv4_hdr *out_hdr;
> +
> + /* Allocate direct buffer */
> + out_pkt = rte_pktmbuf_alloc(pool_direct);
> + if (unlikely(out_pkt == NULL)) {
> + __free_fragments(pkts_out, out_pkt_pos);
> + return -ENOMEM;
> + }
> + if (unlikely(out_pkt->buf_len - rte_pktmbuf_headroom(out_pkt) <
> + frag_size)) {
As a nit, might be better;
if (rte_pktmbuf_tailroom(out_pkt) < frag_size) {...}
Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
> + rte_pktmbuf_free(out_pkt);
> + __free_fragments(pkts_out, out_pkt_pos);
> + return -EINVAL;
> + }
> +
> + /* Reserve space for the IP header that will be built later */
> + out_pkt->data_len = header_len;
> + out_pkt->pkt_len = header_len;
> + frag_bytes_remaining = frag_size;
> +
> + more_out_segs = 1;
> + while (likely(more_out_segs && more_in_segs)) {
> + uint32_t len;
> +
> + len = frag_bytes_remaining;
> + if (len > (in_seg->data_len - in_seg_data_pos))
> + len = in_seg->data_len - in_seg_data_pos;
> +
> + memcpy(rte_pktmbuf_mtod_offset(out_pkt, char *,
> + out_pkt->data_len),
> + rte_pktmbuf_mtod_offset(in_seg, char *,
> + in_seg_data_pos),
> + len);
> +
> + in_seg_data_pos += len;
> + frag_bytes_remaining -= len;
> + out_pkt->data_len += len;
> +
> + /* Current output packet (i.e. fragment) done ? */
> + if (unlikely(frag_bytes_remaining == 0))
> + more_out_segs = 0;
> +
> + /* Current input segment done ? */
> + if (unlikely(in_seg_data_pos == in_seg->data_len)) {
> + in_seg = in_seg->next;
> + in_seg_data_pos = 0;
> +
> + if (unlikely(in_seg == NULL))
> + more_in_segs = 0;
> + }
> + }
> +
> + /* Build the IP header */
> +
> + out_pkt->pkt_len = out_pkt->data_len;
> + out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
> +
> + __fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
> + (uint16_t)out_pkt->pkt_len,
> + flag_offset, fragment_offset, more_in_segs);
> +
> + if (unlikely((fragment_offset == 0) && (ipopt_len) &&
> + ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
> + ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
> + ipopt_len, ipopt_frag_hdr);
> + fragment_offset = (uint16_t)(fragment_offset +
> + out_pkt->pkt_len - header_len);
> + out_pkt->l3_len = header_len;
> +
> + header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
> + in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
> + } else {
> + fragment_offset = (uint16_t)(fragment_offset +
> + out_pkt->pkt_len - header_len);
> + out_pkt->l3_len = header_len;
> + }
> +
> + /* Write the fragment to the output list */
> + pkts_out[out_pkt_pos] = out_pkt;
> + out_pkt_pos++;
> + }
> +
> + return out_pkt_pos;
> +}
> diff --git a/lib/ip_frag/version.map b/lib/ip_frag/version.map
> index b9c1cca..8aad839 100644
> --- a/lib/ip_frag/version.map
> +++ b/lib/ip_frag/version.map
> @@ -17,4 +17,5 @@ EXPERIMENTAL {
> global:
>
> rte_ip_frag_table_del_expired_entries;
> + rte_ipv4_fragment_copy_nonseg_packet;
> };
>
> At 2022-07-25 23:42:06, "Stephen Hemminger" <stephen@networkplumber.org> wrote:
>>On Sun, 24 Jul 2022 16:10:03 +0800
>>Huichao Cai <chcchc88@163.com> wrote:
>>
>>> +
>>> + /*
>>> + * Formal parameter checking.
>>> + */
>>> + if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
>>> + unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
>>> + unlikely(mtu_size < RTE_ETHER_MIN_MTU))
>>> + return -EINVAL;
>>> +
>>> + in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
>>> + header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
>>> + RTE_IPV4_IHL_MULTIPLIER;
>>> +
>>> + /* Check IP header length */
>>> + if (unlikely(pkt_in->data_len < header_len) ||
>>> + unlikely(mtu_size < header_len))
>>> + return -EINVAL;
>>> +
>>
>>My suspicions are all this input parameter checking probably costs more
> >than any performance gain of having a non-segmented fast path.
I think checks are not that expensive.
My guess - actual copying will be the main cycles eater here.
Though if percentage of packets that need to be fragmented is tiny,
might be it is still worth it.
Though yes, I still think better would be not to use MBUF_FAST_FREE at
all, but we are where we are.
> These checks are consistent with the rte_ipv4_fragment_packet function.
> I think these have been tested for performance.If these checks do affect
> performance,
> perhaps the legitimacy of the variable is better guaranteed by the caller
@@ -418,10 +418,17 @@ static void ut_teardown(void)
}
if (tests[i].ipv == 4)
- len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
+ if (i % 2)
+ len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
tests[i].mtu_size,
direct_pool,
indirect_pool);
+ else
+ len = rte_ipv4_fragment_copy_nonseg_packet(b,
+ pkts_out,
+ BURST,
+ tests[i].mtu_size,
+ direct_pool);
else if (tests[i].ipv == 6)
len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
tests[i].mtu_size,
@@ -179,6 +179,40 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
struct rte_mempool *pool_indirect);
/**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy
+ * non-segmented mbuf.
+ * This function is mainly used to adapt TX MBUF_FAST_FREE offload.
+ * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
+ * When set application must guarantee that per-queue all mbufs comes from
+ * the same mempool,has refcnt = 1,direct and non-segmented.
+ *
+ * @param pkt_in
+ * The input packet.
+ * @param pkts_out
+ * Array storing the output fragments.
+ * @param nb_pkts_out
+ * Number of fragments.
+ * @param mtu_size
+ * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ * datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ * MBUF pool used for allocating direct buffers for the output fragments.
+ * @return
+ * Upon successful completion - number of output fragments placed
+ * in the pkts_out array.
+ * Otherwise - (-1) * errno.
+ */
+__rte_experimental
+int32_t
+rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out,
+ uint16_t mtu_size,
+ struct rte_mempool *pool_direct);
+
+/**
* This function implements reassembly of fragmented IPv4 packets.
* Incoming mbufs should have its l2_len/l3_len fields setup correctly.
*
@@ -259,3 +259,178 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
return out_pkt_pos;
}
+
+/**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy
+ * non-segmented mbuf.
+ * This function is mainly used to adapt TX MBUF_FAST_FREE offload.
+ * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
+ * When set application must guarantee that per-queue all mbufs comes from
+ * the same mempool,has refcnt = 1,direct and non-segmented.
+ *
+ * @param pkt_in
+ * The input packet.
+ * @param pkts_out
+ * Array storing the output fragments.
+ * @param nb_pkts_out
+ * Number of fragments.
+ * @param mtu_size
+ * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ * datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ * MBUF pool used for allocating direct buffers for the output fragments.
+ * @return
+ * Upon successful completion - number of output fragments placed
+ * in the pkts_out array.
+ * Otherwise - (-1) * errno.
+ */
+int32_t
+rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out,
+ uint16_t mtu_size,
+ struct rte_mempool *pool_direct)
+{
+ struct rte_mbuf *in_seg = NULL;
+ struct rte_ipv4_hdr *in_hdr;
+ uint32_t out_pkt_pos, in_seg_data_pos;
+ uint32_t more_in_segs;
+ uint16_t fragment_offset, flag_offset, frag_size, header_len;
+ uint16_t frag_bytes_remaining;
+ uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
+ uint16_t ipopt_len;
+
+ /*
+ * Formal parameter checking.
+ */
+ if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
+ unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
+ unlikely(mtu_size < RTE_ETHER_MIN_MTU))
+ return -EINVAL;
+
+ in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
+ header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER;
+
+ /* Check IP header length */
+ if (unlikely(pkt_in->data_len < header_len) ||
+ unlikely(mtu_size < header_len))
+ return -EINVAL;
+
+ /*
+ * Ensure the IP payload length of all fragments is aligned to a
+ * multiple of 8 bytes as per RFC791 section 2.3.
+ */
+ frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
+ IPV4_HDR_FO_ALIGN);
+
+ flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
+
+ /* If Don't Fragment flag is set */
+ if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
+ return -ENOTSUP;
+
+ /* Check that pkts_out is big enough to hold all fragments */
+ if (unlikely(frag_size * nb_pkts_out <
+ (uint16_t)(pkt_in->pkt_len - header_len)))
+ return -EINVAL;
+
+ in_seg = pkt_in;
+ in_seg_data_pos = header_len;
+ out_pkt_pos = 0;
+ fragment_offset = 0;
+
+ ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
+ if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
+ return -EINVAL;
+
+ more_in_segs = 1;
+ while (likely(more_in_segs)) {
+ struct rte_mbuf *out_pkt = NULL;
+ uint32_t more_out_segs;
+ struct rte_ipv4_hdr *out_hdr;
+
+ /* Allocate direct buffer */
+ out_pkt = rte_pktmbuf_alloc(pool_direct);
+ if (unlikely(out_pkt == NULL)) {
+ __free_fragments(pkts_out, out_pkt_pos);
+ return -ENOMEM;
+ }
+ if (unlikely(out_pkt->buf_len - rte_pktmbuf_headroom(out_pkt) <
+ frag_size)) {
+ rte_pktmbuf_free(out_pkt);
+ __free_fragments(pkts_out, out_pkt_pos);
+ return -EINVAL;
+ }
+
+ /* Reserve space for the IP header that will be built later */
+ out_pkt->data_len = header_len;
+ out_pkt->pkt_len = header_len;
+ frag_bytes_remaining = frag_size;
+
+ more_out_segs = 1;
+ while (likely(more_out_segs && more_in_segs)) {
+ uint32_t len;
+
+ len = frag_bytes_remaining;
+ if (len > (in_seg->data_len - in_seg_data_pos))
+ len = in_seg->data_len - in_seg_data_pos;
+
+ memcpy(rte_pktmbuf_mtod_offset(out_pkt, char *,
+ out_pkt->data_len),
+ rte_pktmbuf_mtod_offset(in_seg, char *,
+ in_seg_data_pos),
+ len);
+
+ in_seg_data_pos += len;
+ frag_bytes_remaining -= len;
+ out_pkt->data_len += len;
+
+ /* Current output packet (i.e. fragment) done ? */
+ if (unlikely(frag_bytes_remaining == 0))
+ more_out_segs = 0;
+
+ /* Current input segment done ? */
+ if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+ in_seg = in_seg->next;
+ in_seg_data_pos = 0;
+
+ if (unlikely(in_seg == NULL))
+ more_in_segs = 0;
+ }
+ }
+
+ /* Build the IP header */
+
+ out_pkt->pkt_len = out_pkt->data_len;
+ out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
+
+ __fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
+ (uint16_t)out_pkt->pkt_len,
+ flag_offset, fragment_offset, more_in_segs);
+
+ if (unlikely((fragment_offset == 0) && (ipopt_len) &&
+ ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
+ ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
+ ipopt_len, ipopt_frag_hdr);
+ fragment_offset = (uint16_t)(fragment_offset +
+ out_pkt->pkt_len - header_len);
+ out_pkt->l3_len = header_len;
+
+ header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
+ in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
+ } else {
+ fragment_offset = (uint16_t)(fragment_offset +
+ out_pkt->pkt_len - header_len);
+ out_pkt->l3_len = header_len;
+ }
+
+ /* Write the fragment to the output list */
+ pkts_out[out_pkt_pos] = out_pkt;
+ out_pkt_pos++;
+ }
+
+ return out_pkt_pos;
+}
@@ -17,4 +17,5 @@ EXPERIMENTAL {
global:
rte_ip_frag_table_del_expired_entries;
+ rte_ipv4_fragment_copy_nonseg_packet;
};