[v2,1/7] mbuf: new function to generate raw Tx offload value
Checks
Commit Message
Operations to set/update bit-fields often cause compilers
to generate suboptimal code.
To help avoid such situation for tx_offload fields:
introduce new enum for tx_offload bit-fields lengths and offsets,
and new function to generate raw tx_offload value.
Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
lib/librte_mbuf/rte_mbuf.h | 71 ++++++++++++++++++++++++++++++++++----
1 file changed, 64 insertions(+), 7 deletions(-)
Comments
>
> +static inline uint64_t
I like this idea. I prefer to add __rte_always_inline for this
function.
> +rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4,
> uint64_t tso,
> + uint64_t ol3, uint64_t ol2)
> +{
> + return il2 << RTE_MBUF_L2_LEN_OFS |
> + il3 << RTE_MBUF_L3_LEN_OFS |
> + il4 << RTE_MBUF_L4_LEN_OFS |
> + tso << RTE_MBUF_TSO_SEGSZ_OFS |
> + ol3 << RTE_MBUF_OL3_LEN_OFS |
> + ol2 << RTE_MBUF_OL2_LEN_OFS;
> +}
> +
> /**
> * Validate general requirements for Tx offload in mbuf.
> *
Hi Konstantin,
Wednesday, March 20, 2019 8:47 PM, Konstantin Ananyev:
> Subject: [dpdk-dev] [PATCH v2 1/7] mbuf: new function to generate raw Tx
> offload value
>
> Operations to set/update bit-fields often cause compilers to generate
> suboptimal code.
> To help avoid such situation for tx_offload fields:
> introduce new enum for tx_offload bit-fields lengths and offsets, and new
> function to generate raw tx_offload value.
>
> Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> ---
> lib/librte_mbuf/rte_mbuf.h | 71
> ++++++++++++++++++++++++++++++++++----
> 1 file changed, 64 insertions(+), 7 deletions(-)
>
> diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index
> d961ccaf6..b967ad17e 100644
> --- a/lib/librte_mbuf/rte_mbuf.h
> +++ b/lib/librte_mbuf/rte_mbuf.h
> @@ -479,6 +479,26 @@ struct rte_mbuf_sched {
> uint16_t reserved; /**< Reserved. */
> }; /**< Hierarchical scheduler */
>
> +/** enum for the tx_offload bit-fields lenghts and offsets. */ enum {
> + RTE_MBUF_L2_LEN_BITS = 7,
> + RTE_MBUF_L3_LEN_BITS = 9,
> + RTE_MBUF_L4_LEN_BITS = 8,
> + RTE_MBUF_TSO_SEGSZ_BITS = 16,
> + RTE_MBUF_OL3_LEN_BITS = 9,
> + RTE_MBUF_OL2_LEN_BITS = 7,
> + RTE_MBUF_L2_LEN_OFS = 0,
> + RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS +
> RTE_MBUF_L2_LEN_BITS,
> + RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS +
> RTE_MBUF_L3_LEN_BITS,
> + RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS +
> RTE_MBUF_L4_LEN_BITS,
> + RTE_MBUF_OL3_LEN_OFS = RTE_MBUF_TSO_SEGSZ_OFS +
> RTE_MBUF_TSO_SEGSZ_BITS,
> + RTE_MBUF_OL2_LEN_OFS = RTE_MBUF_OL3_LEN_OFS +
> RTE_MBUF_OL3_LEN_BITS,
> + RTE_MBUF_TXOFLD_UNUSED_OFS =
> + RTE_MBUF_OL2_LEN_OFS + RTE_MBUF_OL2_LEN_BITS,
> + RTE_MBUF_TXOFLD_UNUSED_BITS =
> + sizeof(uint64_t) * CHAR_BIT -
> RTE_MBUF_TXOFLD_UNUSED_OFS, };
> +
> /**
> * The generic rte_mbuf, containing a packet mbuf.
> */
> @@ -640,19 +660,24 @@ struct rte_mbuf {
> uint64_t tx_offload; /**< combined for easy fetch */
> __extension__
> struct {
> - uint64_t l2_len:7;
> + uint64_t l2_len:RTE_MBUF_L2_LEN_BITS;
> /**< L2 (MAC) Header Length for non-tunneling pkt.
> * Outer_L4_len + ... + Inner_L2_len for tunneling
> pkt.
> */
> - uint64_t l3_len:9; /**< L3 (IP) Header Length. */
> - uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length.
> */
> - uint64_t tso_segsz:16; /**< TCP TSO segment size */
> + uint64_t l3_len:RTE_MBUF_L3_LEN_BITS;
> + /**< L3 (IP) Header Length. */
> + uint64_t l4_len:RTE_MBUF_L4_LEN_BITS;
> + /**< L4 (TCP/UDP) Header Length. */
> + uint64_t tso_segsz:RTE_MBUF_TSO_SEGSZ_BITS;
> + /**< TCP TSO segment size */
>
> /* fields for TX offloading of tunnels */
> - uint64_t outer_l3_len:9; /**< Outer L3 (IP) Hdr
> Length. */
> - uint64_t outer_l2_len:7; /**< Outer L2 (MAC) Hdr
> Length. */
> + uint64_t outer_l3_len:RTE_MBUF_OL3_LEN_BITS;
> + /**< Outer L3 (IP) Hdr Length. */
> + uint64_t outer_l2_len:RTE_MBUF_OL2_LEN_BITS;
> + /**< Outer L2 (MAC) Hdr Length. */
>
> - /* uint64_t unused:8; */
> + /* uint64_t
> unused:RTE_MBUF_TXOFLD_UNUSED_BITS; */
I am not sure we want to decide now that this remaining byte is for another Tx offloads.
Maybe it is better to change a bit the rte_mbuf_tx_offload API to receive also the mbuf and to assign the offload values w/ mask.
> };
> };
>
> @@ -2243,6 +2268,38 @@ static inline int rte_pktmbuf_chain(struct
> rte_mbuf *head, struct rte_mbuf *tail
> return 0;
> }
>
> +/*
> + * @warning
> + * @b EXPERIMENTAL: This API may change without prior notice.
> + *
> + * For given input values generate raw tx_offload value.
> + * @param il2
> + * l2_len value.
> + * @param il3
> + * l3_len value.
> + * @param il4
> + * l4_len value.
> + * @param tso
> + * tso_segsz value.
> + * @param ol3
> + * outer_l3_len value.
> + * @param ol2
> + * outer_l2_len value.
> + * @return
> + * raw tx_offload value.
> + */
> +static inline uint64_t
> +rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
> + uint64_t ol3, uint64_t ol2)
> +{
> + return il2 << RTE_MBUF_L2_LEN_OFS |
> + il3 << RTE_MBUF_L3_LEN_OFS |
> + il4 << RTE_MBUF_L4_LEN_OFS |
> + tso << RTE_MBUF_TSO_SEGSZ_OFS |
> + ol3 << RTE_MBUF_OL3_LEN_OFS |
> + ol2 << RTE_MBUF_OL2_LEN_OFS;
> +}
> +
> /**
> * Validate general requirements for Tx offload in mbuf.
> *
> --
> 2.17.1
Hi Shahaf,
> Hi Konstantin,
>
> Wednesday, March 20, 2019 8:47 PM, Konstantin Ananyev:
> > Subject: [dpdk-dev] [PATCH v2 1/7] mbuf: new function to generate raw Tx
> > offload value
> >
> > Operations to set/update bit-fields often cause compilers to generate
> > suboptimal code.
> > To help avoid such situation for tx_offload fields:
> > introduce new enum for tx_offload bit-fields lengths and offsets, and new
> > function to generate raw tx_offload value.
> >
> > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > ---
> > lib/librte_mbuf/rte_mbuf.h | 71
> > ++++++++++++++++++++++++++++++++++----
> > 1 file changed, 64 insertions(+), 7 deletions(-)
> >
> > diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index
> > d961ccaf6..b967ad17e 100644
> > --- a/lib/librte_mbuf/rte_mbuf.h
> > +++ b/lib/librte_mbuf/rte_mbuf.h
> > @@ -479,6 +479,26 @@ struct rte_mbuf_sched {
> > uint16_t reserved; /**< Reserved. */
> > }; /**< Hierarchical scheduler */
> >
> > +/** enum for the tx_offload bit-fields lenghts and offsets. */ enum {
> > + RTE_MBUF_L2_LEN_BITS = 7,
> > + RTE_MBUF_L3_LEN_BITS = 9,
> > + RTE_MBUF_L4_LEN_BITS = 8,
> > + RTE_MBUF_TSO_SEGSZ_BITS = 16,
> > + RTE_MBUF_OL3_LEN_BITS = 9,
> > + RTE_MBUF_OL2_LEN_BITS = 7,
> > + RTE_MBUF_L2_LEN_OFS = 0,
> > + RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS +
> > RTE_MBUF_L2_LEN_BITS,
> > + RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS +
> > RTE_MBUF_L3_LEN_BITS,
> > + RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS +
> > RTE_MBUF_L4_LEN_BITS,
> > + RTE_MBUF_OL3_LEN_OFS = RTE_MBUF_TSO_SEGSZ_OFS +
> > RTE_MBUF_TSO_SEGSZ_BITS,
> > + RTE_MBUF_OL2_LEN_OFS = RTE_MBUF_OL3_LEN_OFS +
> > RTE_MBUF_OL3_LEN_BITS,
> > + RTE_MBUF_TXOFLD_UNUSED_OFS =
> > + RTE_MBUF_OL2_LEN_OFS + RTE_MBUF_OL2_LEN_BITS,
> > + RTE_MBUF_TXOFLD_UNUSED_BITS =
> > + sizeof(uint64_t) * CHAR_BIT -
> > RTE_MBUF_TXOFLD_UNUSED_OFS, };
> > +
> > /**
> > * The generic rte_mbuf, containing a packet mbuf.
> > */
> > @@ -640,19 +660,24 @@ struct rte_mbuf {
> > uint64_t tx_offload; /**< combined for easy fetch */
> > __extension__
> > struct {
> > - uint64_t l2_len:7;
> > + uint64_t l2_len:RTE_MBUF_L2_LEN_BITS;
> > /**< L2 (MAC) Header Length for non-tunneling pkt.
> > * Outer_L4_len + ... + Inner_L2_len for tunneling
> > pkt.
> > */
> > - uint64_t l3_len:9; /**< L3 (IP) Header Length. */
> > - uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length.
> > */
> > - uint64_t tso_segsz:16; /**< TCP TSO segment size */
> > + uint64_t l3_len:RTE_MBUF_L3_LEN_BITS;
> > + /**< L3 (IP) Header Length. */
> > + uint64_t l4_len:RTE_MBUF_L4_LEN_BITS;
> > + /**< L4 (TCP/UDP) Header Length. */
> > + uint64_t tso_segsz:RTE_MBUF_TSO_SEGSZ_BITS;
> > + /**< TCP TSO segment size */
> >
> > /* fields for TX offloading of tunnels */
> > - uint64_t outer_l3_len:9; /**< Outer L3 (IP) Hdr
> > Length. */
> > - uint64_t outer_l2_len:7; /**< Outer L2 (MAC) Hdr
> > Length. */
> > + uint64_t outer_l3_len:RTE_MBUF_OL3_LEN_BITS;
> > + /**< Outer L3 (IP) Hdr Length. */
> > + uint64_t outer_l2_len:RTE_MBUF_OL2_LEN_BITS;
> > + /**< Outer L2 (MAC) Hdr Length. */
> >
> > - /* uint64_t unused:8; */
> > + /* uint64_t
> > unused:RTE_MBUF_TXOFLD_UNUSED_BITS; */
>
> I am not sure we want to decide now that this remaining byte is for another Tx offloads.
Not sure, I understand what is your concern here...
tx_offload field is 8B long, see 'uint64_t tx_offload' above.
All I did replaced raw number '8' with enum value ' RTE_MBUF_TXOFLD_UNUSED_BITS' which also evalueate to 8.
> Maybe it is better to change a bit the rte_mbuf_tx_offload API to receive also the mbuf and to assign the offload values w/ mask.
Could you explain a bit more what do you suggest here?
Konstantin
>
> > };
> > };
> >
> > @@ -2243,6 +2268,38 @@ static inline int rte_pktmbuf_chain(struct
> > rte_mbuf *head, struct rte_mbuf *tail
> > return 0;
> > }
> >
> > +/*
> > + * @warning
> > + * @b EXPERIMENTAL: This API may change without prior notice.
> > + *
> > + * For given input values generate raw tx_offload value.
> > + * @param il2
> > + * l2_len value.
> > + * @param il3
> > + * l3_len value.
> > + * @param il4
> > + * l4_len value.
> > + * @param tso
> > + * tso_segsz value.
> > + * @param ol3
> > + * outer_l3_len value.
> > + * @param ol2
> > + * outer_l2_len value.
> > + * @return
> > + * raw tx_offload value.
> > + */
> > +static inline uint64_t
> > +rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
> > + uint64_t ol3, uint64_t ol2)
> > +{
> > + return il2 << RTE_MBUF_L2_LEN_OFS |
> > + il3 << RTE_MBUF_L3_LEN_OFS |
> > + il4 << RTE_MBUF_L4_LEN_OFS |
> > + tso << RTE_MBUF_TSO_SEGSZ_OFS |
> > + ol3 << RTE_MBUF_OL3_LEN_OFS |
> > + ol2 << RTE_MBUF_OL2_LEN_OFS;
> > +}
> > +
> > /**
> > * Validate general requirements for Tx offload in mbuf.
> > *
> > --
> > 2.17.1
Thursday, March 21, 2019 3:52 PM, Ananyev, Konstantin:
> Subject: RE: [dpdk-dev] [PATCH v2 1/7] mbuf: new function to generate raw
> Tx offload value
>
> Hi Shahaf,
>
> > Hi Konstantin,
> >
> > Wednesday, March 20, 2019 8:47 PM, Konstantin Ananyev:
> > > /**
> > > * The generic rte_mbuf, containing a packet mbuf.
> > > */
> > > @@ -640,19 +660,24 @@ struct rte_mbuf {
> > > uint64_t tx_offload; /**< combined for easy fetch */
> > > __extension__
> > > struct {
> > > - uint64_t l2_len:7;
> > > + uint64_t l2_len:RTE_MBUF_L2_LEN_BITS;
> > > /**< L2 (MAC) Header Length for non-tunneling pkt.
> > > * Outer_L4_len + ... + Inner_L2_len for tunneling
> pkt.
> > > */
> > > - uint64_t l3_len:9; /**< L3 (IP) Header Length. */
> > > - uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length.
> > > */
> > > - uint64_t tso_segsz:16; /**< TCP TSO segment size */
> > > + uint64_t l3_len:RTE_MBUF_L3_LEN_BITS;
> > > + /**< L3 (IP) Header Length. */
> > > + uint64_t l4_len:RTE_MBUF_L4_LEN_BITS;
> > > + /**< L4 (TCP/UDP) Header Length. */
> > > + uint64_t tso_segsz:RTE_MBUF_TSO_SEGSZ_BITS;
> > > + /**< TCP TSO segment size */
> > >
> > > /* fields for TX offloading of tunnels */
> > > - uint64_t outer_l3_len:9; /**< Outer L3 (IP) Hdr
> > > Length. */
> > > - uint64_t outer_l2_len:7; /**< Outer L2 (MAC) Hdr
> > > Length. */
> > > + uint64_t outer_l3_len:RTE_MBUF_OL3_LEN_BITS;
> > > + /**< Outer L3 (IP) Hdr Length. */
> > > + uint64_t outer_l2_len:RTE_MBUF_OL2_LEN_BITS;
> > > + /**< Outer L2 (MAC) Hdr Length. */
> > >
> > > - /* uint64_t unused:8; */
> > > + /* uint64_t
> > > unused:RTE_MBUF_TXOFLD_UNUSED_BITS; */
> >
> > I am not sure we want to decide now that this remaining byte is for another
> Tx offloads.
>
> Not sure, I understand what is your concern here...
> tx_offload field is 8B long, see 'uint64_t tx_offload' above.
> All I did replaced raw number '8' with enum value '
> RTE_MBUF_TXOFLD_UNUSED_BITS' which also evalueate to 8.
Sorry, my bad, missed this union w/ the tx_offload. No further issues 😊.
@@ -479,6 +479,26 @@ struct rte_mbuf_sched {
uint16_t reserved; /**< Reserved. */
}; /**< Hierarchical scheduler */
+/** enum for the tx_offload bit-fields lenghts and offsets. */
+enum {
+ RTE_MBUF_L2_LEN_BITS = 7,
+ RTE_MBUF_L3_LEN_BITS = 9,
+ RTE_MBUF_L4_LEN_BITS = 8,
+ RTE_MBUF_TSO_SEGSZ_BITS = 16,
+ RTE_MBUF_OL3_LEN_BITS = 9,
+ RTE_MBUF_OL2_LEN_BITS = 7,
+ RTE_MBUF_L2_LEN_OFS = 0,
+ RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS + RTE_MBUF_L2_LEN_BITS,
+ RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS + RTE_MBUF_L3_LEN_BITS,
+ RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS + RTE_MBUF_L4_LEN_BITS,
+ RTE_MBUF_OL3_LEN_OFS = RTE_MBUF_TSO_SEGSZ_OFS + RTE_MBUF_TSO_SEGSZ_BITS,
+ RTE_MBUF_OL2_LEN_OFS = RTE_MBUF_OL3_LEN_OFS + RTE_MBUF_OL3_LEN_BITS,
+ RTE_MBUF_TXOFLD_UNUSED_OFS =
+ RTE_MBUF_OL2_LEN_OFS + RTE_MBUF_OL2_LEN_BITS,
+ RTE_MBUF_TXOFLD_UNUSED_BITS =
+ sizeof(uint64_t) * CHAR_BIT - RTE_MBUF_TXOFLD_UNUSED_OFS,
+};
+
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
@@ -640,19 +660,24 @@ struct rte_mbuf {
uint64_t tx_offload; /**< combined for easy fetch */
__extension__
struct {
- uint64_t l2_len:7;
+ uint64_t l2_len:RTE_MBUF_L2_LEN_BITS;
/**< L2 (MAC) Header Length for non-tunneling pkt.
* Outer_L4_len + ... + Inner_L2_len for tunneling pkt.
*/
- uint64_t l3_len:9; /**< L3 (IP) Header Length. */
- uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
- uint64_t tso_segsz:16; /**< TCP TSO segment size */
+ uint64_t l3_len:RTE_MBUF_L3_LEN_BITS;
+ /**< L3 (IP) Header Length. */
+ uint64_t l4_len:RTE_MBUF_L4_LEN_BITS;
+ /**< L4 (TCP/UDP) Header Length. */
+ uint64_t tso_segsz:RTE_MBUF_TSO_SEGSZ_BITS;
+ /**< TCP TSO segment size */
/* fields for TX offloading of tunnels */
- uint64_t outer_l3_len:9; /**< Outer L3 (IP) Hdr Length. */
- uint64_t outer_l2_len:7; /**< Outer L2 (MAC) Hdr Length. */
+ uint64_t outer_l3_len:RTE_MBUF_OL3_LEN_BITS;
+ /**< Outer L3 (IP) Hdr Length. */
+ uint64_t outer_l2_len:RTE_MBUF_OL2_LEN_BITS;
+ /**< Outer L2 (MAC) Hdr Length. */
- /* uint64_t unused:8; */
+ /* uint64_t unused:RTE_MBUF_TXOFLD_UNUSED_BITS; */
};
};
@@ -2243,6 +2268,38 @@ static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail
return 0;
}
+/*
+ * @warning
+ * @b EXPERIMENTAL: This API may change without prior notice.
+ *
+ * For given input values generate raw tx_offload value.
+ * @param il2
+ * l2_len value.
+ * @param il3
+ * l3_len value.
+ * @param il4
+ * l4_len value.
+ * @param tso
+ * tso_segsz value.
+ * @param ol3
+ * outer_l3_len value.
+ * @param ol2
+ * outer_l2_len value.
+ * @return
+ * raw tx_offload value.
+ */
+static inline uint64_t
+rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
+ uint64_t ol3, uint64_t ol2)
+{
+ return il2 << RTE_MBUF_L2_LEN_OFS |
+ il3 << RTE_MBUF_L3_LEN_OFS |
+ il4 << RTE_MBUF_L4_LEN_OFS |
+ tso << RTE_MBUF_TSO_SEGSZ_OFS |
+ ol3 << RTE_MBUF_OL3_LEN_OFS |
+ ol2 << RTE_MBUF_OL2_LEN_OFS;
+}
+
/**
* Validate general requirements for Tx offload in mbuf.
*