@@ -47,6 +47,7 @@ LIBABIVER := 1
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tm.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_default.c
#
@@ -41,6 +41,8 @@
#include <rte_vdev.h>
#include <rte_kvargs.h>
#include <rte_errno.h>
+#include <rte_tm_driver.h>
+#include <rte_sched.h>
#include "rte_eth_softnic.h"
#include "rte_eth_softnic_internals.h"
@@ -58,6 +60,10 @@ static const char *pmd_valid_args[] = {
static struct rte_vdev_driver pmd_drv;
+#ifndef TM
+#define TM 1
+#endif
+
static int
pmd_eth_dev_configure(struct rte_eth_dev *dev)
{
@@ -113,6 +119,13 @@ pmd_eth_dev_start(struct rte_eth_dev *dev)
{
struct pmd_internals *p = dev->data->dev_private;
+#if TM
+ /* Initialize the Traffic Manager for the overlay device */
+ int status = tm_init(p);
+ if (status)
+ return status;
+#endif
+
/* Clone dev->data from underlay to overlay */
memcpy(dev->data->mac_pool_sel,
p->udev->data->mac_pool_sel,
@@ -132,6 +145,10 @@ pmd_eth_dev_stop(struct rte_eth_dev *dev)
/* Call the current function for the underlay device */
rte_eth_dev_stop(p->uport_id);
+#if TM
+ /* Free the Traffic Manager for the overlay device */
+ tm_free(p);
+#endif
}
static void
@@ -249,6 +266,14 @@ pmd_eth_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
rte_eth_dev_mac_addr_remove(p->uport_id, &dev->data->mac_addrs[index]);
}
+static int
+pmd_eth_dev_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
+{
+ *(const struct rte_tm_ops **) arg = &pmd_tm_ops;
+
+ return 0;
+}
+
static uint16_t
pmd_eth_dev_tx_burst(void *txq,
struct rte_mbuf **tx_pkts,
@@ -256,13 +281,30 @@ pmd_eth_dev_tx_burst(void *txq,
{
struct pmd_internals *p = txq;
- return rte_eth_tx_burst(p->uport_id, p->txq_id, tx_pkts, nb_pkts);
+#if TM
+ rte_sched_port_enqueue(p->sched, tx_pkts, nb_pkts);
+ return nb_pkts;
+#else
+ return rte_eth_tx_burst(p->uport_id, p->txq_id, tx_pkts, nb_pkts);
+#endif
}
int
-rte_eth_softnic_run(uint8_t port_id __rte_unused)
+rte_eth_softnic_run(uint8_t port_id)
{
+ struct rte_eth_dev *odev = &rte_eth_devices[port_id];
+ struct pmd_internals *p = odev->data->dev_private;
+ uint32_t n_pkts, n_pkts_deq;
+
+ n_pkts_deq = rte_sched_port_dequeue(p->sched, p->pkts, p->deq_bsz);
+
+ for (n_pkts = 0; n_pkts < n_pkts_deq;)
+ n_pkts += rte_eth_tx_burst(p->uport_id,
+ p->txq_id,
+ &p->pkts[n_pkts],
+ (uint16_t) (n_pkts_deq - n_pkts));
+
return 0;
}
@@ -287,6 +329,7 @@ pmd_ops_build(struct eth_dev_ops *o, const struct eth_dev_ops *u)
o->mac_addr_set = pmd_eth_dev_mac_addr_set;
o->mac_addr_add = pmd_eth_dev_mac_addr_add;
o->mac_addr_remove = pmd_eth_dev_mac_addr_remove;
+ o->tm_ops_get = pmd_eth_dev_tm_ops_get;
}
int
@@ -38,9 +38,25 @@
#include <rte_mbuf.h>
#include <rte_ethdev.h>
+#include <rte_sched.h>
#include "rte_eth_softnic.h"
+#ifndef TM_MAX_SUBPORTS
+#define TM_MAX_SUBPORTS 8
+#endif
+
+#ifndef TM_MAX_PIPES_PER_SUBPORT
+#define TM_MAX_PIPES_PER_SUBPORT 4096
+#endif
+
+struct tm_params {
+ struct rte_sched_port_params port_params;
+ struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS];
+ struct rte_sched_pipe_params pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
+ int pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT];
+};
+
struct pmd_internals {
/* Devices */
struct rte_eth_dev *odev;
@@ -54,10 +70,20 @@ struct pmd_internals {
/* Operation */
struct rte_mbuf *pkts[RTE_ETH_SOFTNIC_DEQ_BSZ_MAX];
+ struct tm_params tm_params;
+ struct rte_sched_port *sched;
uint32_t deq_bsz;
uint32_t txq_id;
};
+extern const struct rte_tm_ops pmd_tm_ops;
+
+int
+tm_init(struct pmd_internals *p);
+
+void
+tm_free(struct pmd_internals *p);
+
void
pmd_ops_inherit(struct eth_dev_ops *o, const struct eth_dev_ops *u);
new file mode 100644
@@ -0,0 +1,235 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+
+#include <rte_tm_driver.h>
+#include <rte_sched.h>
+
+#include "rte_eth_softnic_internals.h"
+#include "rte_eth_softnic.h"
+
+int
+tm_init(struct pmd_internals *p)
+{
+ struct tm_params *t = &p->tm_params;
+ uint32_t n_subports, subport_id;
+ int status;
+
+ /* Port */
+ t->port_params.name = p->odev->data->name;
+ t->port_params.socket = p->udev->data->numa_node;
+ t->port_params.rate = p->udev->data->dev_link.link_speed;
+
+ p->sched = rte_sched_port_config(&t->port_params);
+ if (p->sched == NULL)
+ return -1;
+
+ /* Subport */
+ n_subports = t->port_params.n_subports_per_port;
+ for (subport_id = 0; subport_id < n_subports; subport_id++) {
+ uint32_t n_pipes_per_subport = t->port_params.n_pipes_per_subport;
+ uint32_t pipe_id;
+
+ status = rte_sched_subport_config(p->sched,
+ subport_id,
+ &t->subport_params[subport_id]);
+ if (status) {
+ rte_sched_port_free(p->sched);
+ return -1;
+ }
+
+ /* Pipe */
+ n_pipes_per_subport = t->port_params.n_pipes_per_subport;
+ for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
+ int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
+ int profile_id = t->pipe_to_profile[pos];
+
+ if (profile_id < 0)
+ continue;
+
+ status = rte_sched_pipe_config(p->sched,
+ subport_id,
+ pipe_id,
+ profile_id);
+ if (status) {
+ rte_sched_port_free(p->sched);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void
+tm_free(struct pmd_internals *p)
+{
+ if (p->sched)
+ rte_sched_port_free(p->sched);
+}
+
+/* Traffic manager node type get */
+static int
+pmd_tm_node_type_get(struct rte_eth_dev *dev __rte_unused,
+ uint32_t node_id __rte_unused,
+ int *is_leaf __rte_unused,
+ struct rte_tm_error *error __rte_unused)
+{
+ return 0;
+}
+
+/* Traffic manager capabilities get */
+static int
+pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_tm_capabilities *cap __rte_unused,
+ struct rte_tm_error *error __rte_unused)
+{
+ return 0;
+}
+
+/* Traffic manager level capabilities get */
+static int
+pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_level_capabilities *cap __rte_unused,
+ struct rte_tm_error *error __rte_unused)
+{
+ return 0;
+}
+
+/* Traffic manager node capabilities get */
+static int
+pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+ uint32_t node_id __rte_unused,
+ struct rte_tm_node_capabilities *cap __rte_unused,
+ struct rte_tm_error *error __rte_unused)
+{
+ return 0;
+}
+
+/* Traffic manager shaper profile add */
+static int
+pmd_tm_shaper_profile_add(struct rte_eth_dev *dev __rte_unused,
+ uint32_t shaper_profile_id __rte_unused,
+ struct rte_tm_shaper_params *profile __rte_unused,
+ struct rte_tm_error *error __rte_unused)
+{
+ return 0;
+}
+
+/* Traffic manager shaper profile delete */
+static int
+pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev __rte_unused,
+ uint32_t shaper_profile_id __rte_unused,
+ struct rte_tm_error *error __rte_unused)
+{
+ return 0;
+}
+
+/* Traffic manager node add */
+static int
+pmd_tm_node_add(struct rte_eth_dev *dev __rte_unused,
+ uint32_t node_id __rte_unused,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority __rte_unused,
+ uint32_t weight __rte_unused,
+ struct rte_tm_node_params *params __rte_unused,
+ struct rte_tm_error *error __rte_unused)
+{
+ return 0;
+}
+
+/* Traffic manager node delete */
+static int
+pmd_tm_node_delete(struct rte_eth_dev *dev __rte_unused,
+ uint32_t node_id __rte_unused,
+ struct rte_tm_error *error __rte_unused)
+{
+ return 0;
+}
+
+/* Traffic manager hierarchy commit */
+static int
+pmd_tm_hierarchy_commit(struct rte_eth_dev *dev __rte_unused,
+ int clear_on_fail __rte_unused,
+ struct rte_tm_error *error __rte_unused)
+{
+ return 0;
+}
+
+/* Traffic manager read stats counters for specific node */
+static int
+pmd_tm_node_stats_read(struct rte_eth_dev *dev __rte_unused,
+ uint32_t node_id __rte_unused,
+ struct rte_tm_node_stats *stats __rte_unused,
+ uint64_t *stats_mask __rte_unused,
+ int clear __rte_unused,
+ struct rte_tm_error *error __rte_unused)
+{
+ return 0;
+}
+
+const struct rte_tm_ops pmd_tm_ops = {
+ .node_type_get = pmd_tm_node_type_get,
+ .capabilities_get = pmd_tm_capabilities_get,
+ .level_capabilities_get = pmd_tm_level_capabilities_get,
+ .node_capabilities_get = pmd_tm_node_capabilities_get,
+
+ .wred_profile_add = NULL,
+ .wred_profile_delete = NULL,
+ .shared_wred_context_add_update = NULL,
+ .shared_wred_context_delete = NULL,
+
+ .shaper_profile_add = pmd_tm_shaper_profile_add,
+ .shaper_profile_delete = pmd_tm_shaper_profile_delete,
+ .shared_shaper_add_update = NULL,
+ .shared_shaper_delete = NULL,
+
+ .node_add = pmd_tm_node_add,
+ .node_delete = pmd_tm_node_delete,
+ .node_suspend = NULL,
+ .node_resume = NULL,
+ .hierarchy_commit = pmd_tm_hierarchy_commit,
+
+ .node_parent_update = NULL,
+ .node_shaper_update = NULL,
+ .node_shared_shaper_update = NULL,
+ .node_stats_update = NULL,
+ .node_wfq_weight_mode_update = NULL,
+ .node_cman_update = NULL,
+ .node_wred_context_update = NULL,
+ .node_shared_wred_context_update = NULL,
+
+ .node_stats_read = pmd_tm_node_stats_read,
+};