@@ -32,15 +32,15 @@
#endif /* IP_FRAG_TBL_STAT */
/* internal functions declarations */
-struct rte_mbuf * ip_frag_process(struct ip_frag_pkt *fp,
- struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb,
- uint16_t ofs, uint16_t len, uint16_t more_frags);
+struct rte_mbuf *ip_frag_process(struct rte_ip_frag_tbl *tbl,
+ struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
+ struct rte_mbuf *mb, uint16_t ofs, uint16_t len, uint16_t more_frags);
-struct ip_frag_pkt * ip_frag_find(struct rte_ip_frag_tbl *tbl,
+struct ip_frag_pkt *ip_frag_find(struct rte_ip_frag_tbl *tbl,
struct rte_ip_frag_death_row *dr,
const struct ip_frag_key *key, uint64_t tms);
-struct ip_frag_pkt * ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
+struct ip_frag_pkt *ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
const struct ip_frag_key *key, uint64_t tms,
struct ip_frag_pkt **free, struct ip_frag_pkt **stale);
@@ -91,7 +91,8 @@ ip_frag_key_cmp(const struct ip_frag_key * k1, const struct ip_frag_key * k2)
/* put fragment on death row */
static inline void
-ip_frag_free(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr)
+ip_frag_free(struct rte_ip_frag_tbl *tbl, struct ip_frag_pkt *fp,
+ struct rte_ip_frag_death_row *dr)
{
uint32_t i, k;
@@ -100,6 +101,7 @@ ip_frag_free(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr)
if (fp->frags[i].mb != NULL) {
dr->row[k++] = fp->frags[i].mb;
fp->frags[i].mb = NULL;
+ tbl->nb_mbufs--;
}
}
@@ -160,7 +162,7 @@ static inline void
ip_frag_tbl_del(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
struct ip_frag_pkt *fp)
{
- ip_frag_free(fp, dr);
+ ip_frag_free(tbl, fp, dr);
ip_frag_key_invalidate(&fp->key);
TAILQ_REMOVE(&tbl->lru, fp, lru);
tbl->use_entries--;
@@ -29,14 +29,13 @@ static inline void
ip_frag_tbl_reuse(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
struct ip_frag_pkt *fp, uint64_t tms)
{
- ip_frag_free(fp, dr);
+ ip_frag_free(tbl, fp, dr);
ip_frag_reset(fp, tms);
TAILQ_REMOVE(&tbl->lru, fp, lru);
TAILQ_INSERT_TAIL(&tbl->lru, fp, lru);
IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, reuse_num, 1);
}
-
static inline void
ipv4_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
{
@@ -88,8 +87,9 @@ ipv6_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
}
struct rte_mbuf *
-ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
- struct rte_mbuf *mb, uint16_t ofs, uint16_t len, uint16_t more_frags)
+ip_frag_process(struct rte_ip_frag_tbl *tbl, struct ip_frag_pkt *fp,
+ struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint16_t ofs,
+ uint16_t len, uint16_t more_frags)
{
uint32_t idx;
@@ -147,7 +147,7 @@ ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
fp->frags[IP_LAST_FRAG_IDX].len);
/* free all fragments, invalidate the entry. */
- ip_frag_free(fp, dr);
+ ip_frag_free(tbl, fp, dr);
ip_frag_key_invalidate(&fp->key);
IP_FRAG_MBUF2DR(dr, mb);
@@ -157,6 +157,7 @@ ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
fp->frags[idx].ofs = ofs;
fp->frags[idx].len = len;
fp->frags[idx].mb = mb;
+ tbl->nb_mbufs++;
mb = NULL;
@@ -205,8 +206,9 @@ ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
fp->frags[IP_LAST_FRAG_IDX].len);
/* free associated resources. */
- ip_frag_free(fp, dr);
- }
+ ip_frag_free(tbl, fp, dr);
+ } else
+ tbl->nb_mbufs -= fp->last_idx;
/* we are done with that entry, invalidate it. */
ip_frag_key_invalidate(&fp->key);
@@ -96,6 +96,7 @@ struct rte_ip_frag_tbl {
uint32_t bucket_entries; /**< hash associativity. */
uint32_t nb_entries; /**< total size of the table. */
uint32_t nb_buckets; /**< num of associativity lines. */
+ uint32_t nb_mbufs; /**< num of mbufs holded in the tbl. */
struct ip_frag_pkt *last; /**< last used entry. */
struct ip_pkt_list lru; /**< LRU list for table entries. */
struct ip_frag_tbl_stat stat; /**< statistics counters. */
@@ -329,8 +330,23 @@ void
rte_ip_frag_table_statistics_dump(FILE * f, const struct rte_ip_frag_tbl *tbl);
/**
- * Delete expired fragments
+ * Number of mbufs holded in the fragmentation table.
+ *
+ * @param tbl
+ * Fragmentation table
*
+ * @return
+ * Number of mbufs holded in the fragmentation table.
+ */
+static inline uint32_t __rte_experimental
+rte_frag_table_mbuf_count(const struct rte_ip_frag_tbl *tbl)
+{
+ return tbl->nb_mbufs;
+}
+
+/**
+ * Delete expired fragments
+ *
* @param tbl
* Table to delete expired fragments from
* @param dr
@@ -75,6 +75,7 @@ rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
tbl->nb_buckets = bucket_num;
tbl->bucket_entries = bucket_entries;
tbl->entry_mask = (tbl->nb_entries - 1) & ~(tbl->bucket_entries - 1);
+ tbl->nb_mbufs = 0;
TAILQ_INIT(&(tbl->lru));
return tbl;
@@ -23,4 +23,5 @@ EXPERIMENTAL {
global:
rte_frag_table_del_expired_entries;
+ rte_frag_table_mbuf_count;
};
@@ -146,7 +146,7 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
/* process the fragmented packet. */
- mb = ip_frag_process(fp, dr, mb, ip_ofs, ip_len, ip_flag);
+ mb = ip_frag_process(tbl, fp, dr, mb, ip_ofs, ip_len, ip_flag);
ip_frag_inuse(tbl, fp);
IP_FRAG_LOG(DEBUG, "%s:%d:\n"
@@ -186,7 +186,7 @@ rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
/* process the fragmented packet. */
- mb = ip_frag_process(fp, dr, mb, ip_ofs, ip_len,
+ mb = ip_frag_process(tbl, fp, dr, mb, ip_ofs, ip_len,
MORE_FRAGS(frag_hdr->frag_data));
ip_frag_inuse(tbl, fp);