From patchwork Fri May 31 00:00:06 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ananyev, Konstantin" X-Patchwork-Id: 53948 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4A0482B9A; Fri, 31 May 2019 02:00:18 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id C88F82AB for ; Fri, 31 May 2019 02:00:16 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 30 May 2019 17:00:15 -0700 X-ExtLoop1: 1 Received: from sivswdev08.ir.intel.com ([10.237.217.47]) by orsmga001.jf.intel.com with ESMTP; 30 May 2019 17:00:13 -0700 From: Konstantin Ananyev To: dev@dpdk.org Cc: akhil.goyal@nxp.com, Konstantin Ananyev Date: Fri, 31 May 2019 01:00:06 +0100 Message-Id: <20190531000006.13918-1-konstantin.ananyev@intel.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20190527182328.12143-1-konstantin.ananyev@intel.com> References: <20190527182328.12143-1-konstantin.ananyev@intel.com> Subject: [dpdk-dev] [PATCH v2] ipsec: support multi-segment packets X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for packets that consist of multiple segments. Take into account that trailer bytes (padding, ESP tail, ICV) can spawn across multiple segments. Signed-off-by: Konstantin Ananyev Acked-by: Akhil Goyal --- v1 -> v2: merge with latest mainline fix build problem for RTE_BUILD_SHARED_LIB=y update programmer's guide doc/guides/prog_guide/ipsec_lib.rst | 1 - lib/librte_ipsec/Makefile | 3 +- lib/librte_ipsec/esp_inb.c | 148 +++++++++++++++++++++------- lib/librte_ipsec/misc.h | 61 ++++++++++++ 4 files changed, 175 insertions(+), 38 deletions(-) diff --git a/doc/guides/prog_guide/ipsec_lib.rst b/doc/guides/prog_guide/ipsec_lib.rst index 6fc08886f..63b75b652 100644 --- a/doc/guides/prog_guide/ipsec_lib.rst +++ b/doc/guides/prog_guide/ipsec_lib.rst @@ -162,7 +162,6 @@ Limitations The following features are not properly supported in the current version: * ESP transport mode for IPv6 packets with extension headers. -* Multi-segment packets. * Updates of the fields in inner IP header for tunnel mode (as described in RFC 4301, section 5.1.2). * Hard/soft limit for SA lifetime (time interval/byte count). diff --git a/lib/librte_ipsec/Makefile b/lib/librte_ipsec/Makefile index e80926baa..22f29d98a 100644 --- a/lib/librte_ipsec/Makefile +++ b/lib/librte_ipsec/Makefile @@ -9,7 +9,8 @@ LIB = librte_ipsec.a CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) CFLAGS += -DALLOW_EXPERIMENTAL_API -LDLIBS += -lrte_eal -lrte_mbuf -lrte_net -lrte_cryptodev -lrte_security +LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_net +LDLIBS += -lrte_cryptodev -lrte_security EXPORT_MAP := rte_ipsec_version.map diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c index 3e12ca103..819d2bf25 100644 --- a/lib/librte_ipsec/esp_inb.c +++ b/lib/librte_ipsec/esp_inb.c @@ -104,6 +104,34 @@ inb_cop_prepare(struct rte_crypto_op *cop, } } +/* + * Helper function for prepare() to deal with situation when + * ICV is spread by two segments. Tries to move ICV completely into the + * last segment. + */ +static struct rte_mbuf * +move_icv(struct rte_mbuf *ml, uint32_t ofs) +{ + uint32_t n; + struct rte_mbuf *ms; + const void *prev; + void *new; + + ms = ml->next; + n = ml->data_len - ofs; + + prev = rte_pktmbuf_mtod_offset(ml, const void *, ofs); + new = rte_pktmbuf_prepend(ms, n); + if (new == NULL) + return NULL; + + /* move n ICV bytes from ml into ms */ + rte_memcpy(new, prev, n); + ml->data_len -= n; + + return ms; +} + /* * for pure cryptodev (lookaside none) depending on SA settings, * we might have to write some extra data to the packet. @@ -137,7 +165,7 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn, { int32_t rc; uint64_t sqn; - uint32_t clen, icv_ofs, plen; + uint32_t clen, icv_len, icv_ofs, plen; struct rte_mbuf *ml; struct rte_esp_hdr *esph; @@ -161,14 +189,33 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn, plen = mb->pkt_len; plen = plen - hlen; - ml = rte_pktmbuf_lastseg(mb); - icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len; - /* check that packet has a valid length */ clen = plen - sa->ctp.cipher.length; if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0) return -EBADMSG; + /* find ICV location */ + icv_len = sa->icv_len; + icv_ofs = mb->pkt_len - icv_len; + + ml = mbuf_get_seg_ofs(mb, &icv_ofs); + + /* + * if ICV is spread by two segments, then try to + * move ICV completely into the last segment. + */ + if (ml->data_len < icv_ofs + icv_len) { + + ml = move_icv(ml, icv_ofs); + if (ml == NULL) + return -ENOSPC; + + /* new ICV location */ + icv_ofs = 0; + } + + icv_ofs += sa->sqh_len; + /* we have to allocate space for AAD somewhere, * right now - just use free trailing space at the last segment. * Would probably be more convenient to reserve space for AAD @@ -239,36 +286,65 @@ esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], */ static inline void process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml, - struct esp_tail *espt, uint32_t *hlen) + struct esp_tail *espt, uint32_t *hlen, uint32_t *tofs) { const struct esp_tail *pt; + uint32_t ofs; - ml[0] = rte_pktmbuf_lastseg(mb); + ofs = mb->pkt_len - tlen; hlen[0] = mb->l2_len + mb->l3_len; - pt = rte_pktmbuf_mtod_offset(ml[0], const struct esp_tail *, - ml[0]->data_len - tlen); + ml[0] = mbuf_get_seg_ofs(mb, &ofs); + pt = rte_pktmbuf_mtod_offset(ml[0], const struct esp_tail *, ofs); + tofs[0] = ofs; espt[0] = pt[0]; } +/* + * Helper function to check pad bytes values. + * Note that pad bytes can be spread across multiple segments. + */ +static inline int +check_pad_bytes(struct rte_mbuf *mb, uint32_t ofs, uint32_t len) +{ + const uint8_t *pd; + uint32_t k, n; + + for (n = 0; n != len; n += k, mb = mb->next) { + k = mb->data_len - ofs; + k = RTE_MIN(k, len - n); + pd = rte_pktmbuf_mtod_offset(mb, const uint8_t *, ofs); + if (memcmp(pd, esp_pad_bytes + n, k) != 0) + break; + ofs = 0; + } + + return len - n; +} + /* * packet checks for transport mode: * - no reported IPsec related failures in ol_flags - * - tail length is valid + * - tail and header lengths are valid * - padding bytes are valid + * apart from checks, function also updates tail offset (and segment) + * by taking into account pad length. */ static inline int32_t -trs_process_check(const struct rte_mbuf *mb, const struct rte_mbuf *ml, - struct esp_tail espt, uint32_t hlen, uint32_t tlen) +trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml, + uint32_t *tofs, struct esp_tail espt, uint32_t hlen, uint32_t tlen) { - const uint8_t *pd; - int32_t ofs; + if ((mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) != 0 || + tlen + hlen > mb->pkt_len) + return -EBADMSG; - ofs = ml->data_len - tlen; - pd = rte_pktmbuf_mtod_offset(ml, const uint8_t *, ofs); + /* padding bytes are spread over multiple segments */ + if (tofs[0] < espt.pad_len) { + tofs[0] = mb->pkt_len - tlen; + ml[0] = mbuf_get_seg_ofs(mb, tofs); + } else + tofs[0] -= espt.pad_len; - return ((mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) != 0 || - ofs < 0 || tlen + hlen > mb->pkt_len || - (espt.pad_len != 0 && memcmp(pd, esp_pad_bytes, espt.pad_len))); + return check_pad_bytes(ml[0], tofs[0], espt.pad_len); } /* @@ -277,10 +353,11 @@ trs_process_check(const struct rte_mbuf *mb, const struct rte_mbuf *ml, * - esp tail next proto contains expected for that SA value */ static inline int32_t -tun_process_check(const struct rte_mbuf *mb, struct rte_mbuf *ml, - struct esp_tail espt, uint32_t hlen, const uint32_t tlen, uint8_t proto) +tun_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml, + uint32_t *tofs, struct esp_tail espt, uint32_t hlen, uint32_t tlen, + uint8_t proto) { - return (trs_process_check(mb, ml, espt, hlen, tlen) || + return (trs_process_check(mb, ml, tofs, espt, hlen, tlen) || espt.next_proto != proto); } @@ -293,7 +370,7 @@ tun_process_check(const struct rte_mbuf *mb, struct rte_mbuf *ml, */ static inline void * tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen, - uint32_t adj, uint32_t tlen, uint32_t *sqn) + uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn) { const struct rte_esp_hdr *ph; @@ -302,8 +379,7 @@ tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen, sqn[0] = ph->seq; /* cut of ICV, ESP tail and padding bytes */ - ml->data_len -= tlen; - mb->pkt_len -= tlen; + mbuf_cut_seg_ofs(mb, ml, tofs, tlen); /* cut of L2/L3 headers, ESP header and IV */ return rte_pktmbuf_adj(mb, adj); @@ -318,7 +394,7 @@ tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen, */ static inline void * trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen, - uint32_t adj, uint32_t tlen, uint32_t *sqn) + uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn) { char *np, *op; @@ -326,7 +402,7 @@ trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen, op = rte_pktmbuf_mtod(mb, char *); /* cut off ESP header and IV */ - np = tun_process_step2(mb, ml, hlen, adj, tlen, sqn); + np = tun_process_step2(mb, ml, hlen, adj, tofs, tlen, sqn); /* move header bytes to fill the gap after ESP header removal */ remove_esph(np, op, hlen); @@ -376,7 +452,7 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num) { uint32_t adj, i, k, tl; - uint32_t hl[num]; + uint32_t hl[num], to[num]; struct esp_tail espt[num]; struct rte_mbuf *ml[num]; @@ -388,7 +464,7 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], * read mbufs metadata and esp tail first. */ for (i = 0; i != num; i++) - process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i]); + process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]); k = 0; for (i = 0; i != num; i++) { @@ -397,11 +473,11 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], tl = tlen + espt[i].pad_len; /* check that packet is valid */ - if (tun_process_check(mb[i], ml[i], espt[i], adj, tl, + if (tun_process_check(mb[i], &ml[i], &to[i], espt[i], adj, tl, sa->proto) == 0) { /* modify packet's layout */ - tun_process_step2(mb[i], ml[i], hl[i], adj, + tun_process_step2(mb[i], ml[i], hl[i], adj, to[i], tl, sqn + k); /* update mbuf's metadata */ tun_process_step3(mb[i], sa->tx_offload.msk, @@ -424,7 +500,7 @@ trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], { char *np; uint32_t i, k, l2, tl; - uint32_t hl[num]; + uint32_t hl[num], to[num]; struct esp_tail espt[num]; struct rte_mbuf *ml[num]; @@ -436,7 +512,7 @@ trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], * read mbufs metadata and esp tail first. */ for (i = 0; i != num; i++) - process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i]); + process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]); k = 0; for (i = 0; i != num; i++) { @@ -445,12 +521,12 @@ trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], l2 = mb[i]->l2_len; /* check that packet is valid */ - if (trs_process_check(mb[i], ml[i], espt[i], hl[i] + cofs, - tl) == 0) { + if (trs_process_check(mb[i], &ml[i], &to[i], espt[i], + hl[i] + cofs, tl) == 0) { /* modify packet's layout */ - np = trs_process_step2(mb[i], ml[i], hl[i], cofs, tl, - sqn + k); + np = trs_process_step2(mb[i], ml[i], hl[i], cofs, + to[i], tl, sqn + k); update_trs_l3hdr(sa, np + l2, mb[i]->pkt_len, l2, hl[i] - l2, espt[i].next_proto); diff --git a/lib/librte_ipsec/misc.h b/lib/librte_ipsec/misc.h index 693a4afdd..b0cafef4e 100644 --- a/lib/librte_ipsec/misc.h +++ b/lib/librte_ipsec/misc.h @@ -38,4 +38,65 @@ move_bad_mbufs(struct rte_mbuf *mb[], const uint32_t bad_idx[], uint32_t nb_mb, mb[k + i] = drb[i]; } +/* + * Find packet's segment for the specified offset. + * ofs - at input should contain required offset, at output would contain + * offset value within the segment. + */ +static inline struct rte_mbuf * +mbuf_get_seg_ofs(struct rte_mbuf *mb, uint32_t *ofs) +{ + uint32_t k, n; + struct rte_mbuf *ms; + + ms = mb; + n = *ofs; + + for (k = rte_pktmbuf_data_len(ms); n >= k; + k = rte_pktmbuf_data_len(ms)) { + ms = ms->next; + n -= k; + } + + *ofs = n; + return ms; +} + +/* + * Trim multi-segment packet at the specified offset, and free + * all unused segments. + * mb - input packet + * ms - segment where to cut + * ofs - offset within the *ms* + * len - length to cut (from given offset to the end of the packet) + * Can be used in conjunction with mbuf_get_seg_ofs(): + * ofs = new_len; + * ms = mbuf_get_seg_ofs(mb, &ofs); + * mbuf_cut_seg_ofs(mb, ms, ofs, mb->pkt_len - new_len); + */ +static inline void +mbuf_cut_seg_ofs(struct rte_mbuf *mb, struct rte_mbuf *ms, uint32_t ofs, + uint32_t len) +{ + uint32_t n, slen; + struct rte_mbuf *mn; + + slen = ms->data_len; + ms->data_len = ofs; + + /* tail spawns through multiple segments */ + if (slen < ofs + len) { + mn = ms->next; + ms->next = NULL; + for (n = 0; mn != NULL; n++) { + ms = mn->next; + rte_pktmbuf_free_seg(mn); + mn = ms; + } + mb->nb_segs -= n; + } + + mb->pkt_len -= len; +} + #endif /* _MISC_H_ */