@@ -17,6 +17,55 @@
(a).ipv6[2] == (b).ipv6[2] && \
(a).ipv6[3] == (b).ipv6[3])
+static void
+ngbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ struct ngbe_ipsec *priv = NGBE_DEV_IPSEC(dev);
+ int i = 0;
+
+ /* clear Rx IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ uint16_t index = i << 3;
+ uint32_t reg_val = NGBE_IPSRXIDX_WRITE |
+ NGBE_IPSRXIDX_TB_IP | index;
+ wr32(hw, NGBE_IPSRXADDR(0), 0);
+ wr32(hw, NGBE_IPSRXADDR(1), 0);
+ wr32(hw, NGBE_IPSRXADDR(2), 0);
+ wr32(hw, NGBE_IPSRXADDR(3), 0);
+ wr32w(hw, NGBE_IPSRXIDX, reg_val, NGBE_IPSRXIDX_WRITE, 1000);
+ }
+
+ /* clear Rx SPI and Rx/Tx SA tables*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ uint32_t index = i << 3;
+ uint32_t reg_val = NGBE_IPSRXIDX_WRITE |
+ NGBE_IPSRXIDX_TB_SPI | index;
+ wr32(hw, NGBE_IPSRXSPI, 0);
+ wr32(hw, NGBE_IPSRXADDRIDX, 0);
+ wr32w(hw, NGBE_IPSRXIDX, reg_val, NGBE_IPSRXIDX_WRITE, 1000);
+ reg_val = NGBE_IPSRXIDX_WRITE | NGBE_IPSRXIDX_TB_KEY | index;
+ wr32(hw, NGBE_IPSRXKEY(0), 0);
+ wr32(hw, NGBE_IPSRXKEY(1), 0);
+ wr32(hw, NGBE_IPSRXKEY(2), 0);
+ wr32(hw, NGBE_IPSRXKEY(3), 0);
+ wr32(hw, NGBE_IPSRXSALT, 0);
+ wr32(hw, NGBE_IPSRXMODE, 0);
+ wr32w(hw, NGBE_IPSRXIDX, reg_val, NGBE_IPSRXIDX_WRITE, 1000);
+ reg_val = NGBE_IPSTXIDX_WRITE | index;
+ wr32(hw, NGBE_IPSTXKEY(0), 0);
+ wr32(hw, NGBE_IPSTXKEY(1), 0);
+ wr32(hw, NGBE_IPSTXKEY(2), 0);
+ wr32(hw, NGBE_IPSTXKEY(3), 0);
+ wr32(hw, NGBE_IPSTXSALT, 0);
+ wr32w(hw, NGBE_IPSTXIDX, reg_val, NGBE_IPSTXIDX_WRITE, 1000);
+ }
+
+ memset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl));
+ memset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl));
+ memset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl));
+}
+
static int
ngbe_crypto_add_sa(struct ngbe_crypto_session *ic_session)
{
@@ -550,6 +599,63 @@ ngbe_crypto_capabilities_get(void *device __rte_unused)
return ngbe_security_capabilities;
}
+int
+ngbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t reg;
+ uint64_t rx_offloads;
+ uint64_t tx_offloads;
+
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
+
+ /* sanity checks */
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
+ return -1;
+ }
+ if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
+ return -1;
+ }
+
+ /* Set NGBE_SECTXBUFFAF to 0x14 as required in the datasheet*/
+ wr32(hw, NGBE_SECTXBUFAF, 0x14);
+
+ /* IFG needs to be set to 3 when we are using security. Otherwise a Tx
+ * hang will occur with heavy traffic.
+ */
+ reg = rd32(hw, NGBE_SECTXIFG);
+ reg = (reg & ~NGBE_SECTXIFG_MIN_MASK) | NGBE_SECTXIFG_MIN(0x3);
+ wr32(hw, NGBE_SECTXIFG, reg);
+
+ reg = rd32(hw, NGBE_SECRXCTL);
+ reg |= NGBE_SECRXCTL_CRCSTRIP;
+ wr32(hw, NGBE_SECRXCTL, reg);
+
+ if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ wr32m(hw, NGBE_SECRXCTL, NGBE_SECRXCTL_ODSA, 0);
+ reg = rd32m(hw, NGBE_SECRXCTL, NGBE_SECRXCTL_ODSA);
+ if (reg != 0) {
+ PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+ return -1;
+ }
+ }
+ if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+ wr32(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_STFWD);
+ reg = rd32(hw, NGBE_SECTXCTL);
+ if (reg != NGBE_SECTXCTL_STFWD) {
+ PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+ return -1;
+ }
+ }
+
+ ngbe_crypto_clear_ipsec_tables(dev);
+
+ return 0;
+}
+
static struct rte_security_ops ngbe_security_ops = {
.session_create = ngbe_crypto_create_session,
.session_get_size = ngbe_crypto_session_get_size,
@@ -90,4 +90,6 @@ struct ngbe_ipsec {
struct ngbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT];
};
+int ngbe_crypto_enable_ipsec(struct rte_eth_dev *dev);
+
#endif /*NGBE_IPSEC_H_*/
@@ -33,6 +33,9 @@ static const u64 NGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
PKT_TX_TCP_SEG |
PKT_TX_TUNNEL_MASK |
PKT_TX_OUTER_IP_CKSUM |
+#ifdef RTE_LIB_SECURITY
+ PKT_TX_SEC_OFFLOAD |
+#endif
NGBE_TX_IEEE1588_TMST);
#define NGBE_TX_OFFLOAD_NOTSUP_MASK \
@@ -274,7 +277,8 @@ ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
static inline void
ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq,
volatile struct ngbe_tx_ctx_desc *ctx_txd,
- uint64_t ol_flags, union ngbe_tx_offload tx_offload)
+ uint64_t ol_flags, union ngbe_tx_offload tx_offload,
+ __rte_unused uint64_t *mdata)
{
union ngbe_tx_offload tx_offload_mask;
uint32_t type_tucmd_mlhl;
@@ -361,6 +365,19 @@ ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq,
vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci);
}
+#ifdef RTE_LIB_SECURITY
+ if (ol_flags & PKT_TX_SEC_OFFLOAD) {
+ union ngbe_crypto_tx_desc_md *md =
+ (union ngbe_crypto_tx_desc_md *)mdata;
+ tunnel_seed |= NGBE_TXD_IPSEC_SAIDX(md->sa_idx);
+ type_tucmd_mlhl |= md->enc ?
+ (NGBE_TXD_IPSEC_ESP | NGBE_TXD_IPSEC_ESPENC) : 0;
+ type_tucmd_mlhl |= NGBE_TXD_IPSEC_ESPLEN(md->pad_len);
+ tx_offload_mask.sa_idx |= ~0;
+ tx_offload_mask.sec_pad_len |= ~0;
+ }
+#endif
+
txq->ctx_cache[ctx_idx].flags = ol_flags;
txq->ctx_cache[ctx_idx].tx_offload.data[0] =
tx_offload_mask.data[0] & tx_offload.data[0];
@@ -592,6 +609,9 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint32_t ctx = 0;
uint32_t new_ctx;
union ngbe_tx_offload tx_offload;
+#ifdef RTE_LIB_SECURITY
+ uint8_t use_ipsec;
+#endif
tx_offload.data[0] = 0;
tx_offload.data[1] = 0;
@@ -618,6 +638,9 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
+#ifdef RTE_LIB_SECURITY
+ use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
+#endif
/* If hardware offload required */
tx_ol_req = ol_flags & NGBE_TX_OFFLOAD_MASK;
@@ -633,6 +656,16 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
tx_offload.outer_tun_len = 0;
+#ifdef RTE_LIB_SECURITY
+ if (use_ipsec) {
+ union ngbe_crypto_tx_desc_md *ipsec_mdata =
+ (union ngbe_crypto_tx_desc_md *)
+ rte_security_dynfield(tx_pkt);
+ tx_offload.sa_idx = ipsec_mdata->sa_idx;
+ tx_offload.sec_pad_len = ipsec_mdata->pad_len;
+ }
+#endif
+
/* If new context need be built or reuse the exist ctx*/
ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
/* Only allocate context descriptor if required */
@@ -776,7 +809,8 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
ngbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- tx_offload);
+ tx_offload,
+ rte_security_dynfield(tx_pkt));
txe->last_id = tx_last;
tx_id = txe->next_id;
@@ -795,6 +829,10 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
olinfo_status |= NGBE_TXD_PAYLEN(pkt_len);
+#ifdef RTE_LIB_SECURITY
+ if (use_ipsec)
+ olinfo_status |= NGBE_TXD_IPSEC;
+#endif
m_seg = tx_pkt;
do {
@@ -978,6 +1016,13 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
pkt_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
}
+#ifdef RTE_LIB_SECURITY
+ if (rx_status & NGBE_RXD_STAT_SECP) {
+ pkt_flags |= PKT_RX_SEC_OFFLOAD;
+ if (rx_status & NGBE_RXD_ERR_SECERR)
+ pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+ }
+#endif
return pkt_flags;
}
@@ -1800,6 +1845,9 @@ ngbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
{
struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
if (txq->offloads == 0 &&
+#ifdef RTE_LIB_SECURITY
+ !(txq->using_ipsec) &&
+#endif
txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST)
return ngbe_tx_done_cleanup_simple(txq, free_cnt);
@@ -1885,6 +1933,9 @@ ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (txq->offloads == 0 &&
+#ifdef RTE_LIB_SECURITY
+ !(txq->using_ipsec) &&
+#endif
txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path");
dev->tx_pkt_burst = ngbe_xmit_pkts_simple;
@@ -1926,6 +1977,10 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
if (hw->is_pf)
tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
+#ifdef RTE_LIB_SECURITY
+ if (dev->security_ctx)
+ tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
return tx_offload_capa;
}
@@ -2012,6 +2067,10 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->offloads = offloads;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
+#ifdef RTE_LIB_SECURITY
+ txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_SECURITY);
+#endif
txq->tdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXWP(txq->reg_idx));
txq->tdc_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXCFG(txq->reg_idx));
@@ -2220,6 +2279,11 @@ ngbe_get_rx_port_offloads(struct rte_eth_dev *dev)
offloads |= (DEV_RX_OFFLOAD_QINQ_STRIP |
DEV_RX_OFFLOAD_VLAN_EXTEND);
+#ifdef RTE_LIB_SECURITY
+ if (dev->security_ctx)
+ offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
return offloads;
}
@@ -2745,6 +2809,7 @@ ngbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
void
ngbe_set_rx_function(struct rte_eth_dev *dev)
{
+ uint16_t i;
struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
if (dev->data->scattered_rx) {
@@ -2788,6 +2853,15 @@ ngbe_set_rx_function(struct rte_eth_dev *dev)
dev->rx_pkt_burst = ngbe_recv_pkts;
}
+
+#ifdef RTE_LIB_SECURITY
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct ngbe_rx_queue *rxq = dev->data->rx_queues[i];
+
+ rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SECURITY);
+ }
+#endif
}
/*
@@ -3052,6 +3126,19 @@ ngbe_dev_rxtx_start(struct rte_eth_dev *dev)
if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
ngbe_setup_loopback_link(hw);
+#ifdef RTE_LIB_SECURITY
+ if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) ||
+ (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) {
+ ret = ngbe_crypto_enable_ipsec(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "ngbe_crypto_enable_ipsec fails with %d.",
+ ret);
+ return ret;
+ }
+ }
+#endif
+
return 0;
}
@@ -261,7 +261,10 @@ struct ngbe_rx_queue {
uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
-
+#ifdef RTE_LIB_SECURITY
+ uint8_t using_ipsec;
+ /** indicates that IPsec Rx feature is in use */
+#endif
uint16_t rx_free_thresh; /**< max free Rx desc to hold */
uint16_t queue_id; /**< RX queue index */
uint16_t reg_idx; /**< RX queue register index */
@@ -305,6 +308,11 @@ union ngbe_tx_offload {
uint64_t outer_tun_len:8; /**< Outer TUN (Tunnel) Hdr Length. */
uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
uint64_t outer_l3_len:16; /**< Outer L3 (IP) Hdr Length. */
+#ifdef RTE_LIB_SECURITY
+ /* inline ipsec related*/
+ uint64_t sa_idx:8; /**< TX SA database entry index */
+ uint64_t sec_pad_len:4; /**< padding length */
+#endif
};
};
@@ -355,6 +363,10 @@ struct ngbe_tx_queue {
uint8_t tx_deferred_start; /**< not in global dev start */
const struct ngbe_txq_ops *ops; /**< txq ops */
+#ifdef RTE_LIB_SECURITY
+ uint8_t using_ipsec;
+ /**< indicates that IPsec TX feature is in use */
+#endif
};
struct ngbe_txq_ops {