diff mbox

[dpdk-dev,RFC,2/2] kdp: add virtual PMD for kernel slow data path communication

Message ID 1453478442-23000-3-git-send-email-ferruh.yigit@intel.com (mailing list archive)
State Superseded, archived
Headers show

Commit Message

Ferruh Yigit Jan. 22, 2016, 4 p.m. UTC
This patch provides slow data path communication to the Linux kernel.
Patch is based on librte_kni, and heavily re-uses it.

The main difference is librte_kni library converted into a PMD, to
provide ease of use for applications.

Now any application can use slow path communication without any update
in application, because of existing eal support for virtual PMD.

PMD's rx_pkt_burst() get packets from FIFO, and tx_pkt_burst() puts
packet to the FIFO.
The corresponding Linux virtual network device driver code
also gets/puts packages from FIFO as they are coming from hardware.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 config/common_linuxapp                  |   1 +
 drivers/net/Makefile                    |   3 +-
 drivers/net/kdp/Makefile                |  60 +++++
 drivers/net/kdp/rte_eth_kdp.c           | 405 ++++++++++++++++++++++++++++++++
 drivers/net/kdp/rte_kdp.c               | 365 ++++++++++++++++++++++++++++
 drivers/net/kdp/rte_kdp.h               | 113 +++++++++
 drivers/net/kdp/rte_kdp_fifo.h          |  91 +++++++
 drivers/net/kdp/rte_pmd_kdp_version.map |   4 +
 lib/librte_eal/common/include/rte_log.h |   3 +-
 mk/rte.app.mk                           |   3 +-
 10 files changed, 1045 insertions(+), 3 deletions(-)
 create mode 100644 drivers/net/kdp/Makefile
 create mode 100644 drivers/net/kdp/rte_eth_kdp.c
 create mode 100644 drivers/net/kdp/rte_kdp.c
 create mode 100644 drivers/net/kdp/rte_kdp.h
 create mode 100644 drivers/net/kdp/rte_kdp_fifo.h
 create mode 100644 drivers/net/kdp/rte_pmd_kdp_version.map
diff mbox

Patch

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 73c91d8..b9dec0c 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -322,6 +322,7 @@  CONFIG_RTE_LIBRTE_PMD_NULL=y
 #
 # Compile KDP PMD
 #
+CONFIG_RTE_LIBRTE_PMD_KDP=y
 CONFIG_RTE_KDP_KMOD=y
 CONFIG_RTE_KDP_PREEMPT_DEFAULT=y
 
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 6e4497e..0be06f5 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -1,6 +1,6 @@ 
 #   BSD LICENSE
 #
-#   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+#   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
 #   All rights reserved.
 #
 #   Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,7 @@  DIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2
 DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio
 DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += xenvirt
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_KDP) += kdp
 
 include $(RTE_SDK)/mk/rte.sharelib.mk
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/net/kdp/Makefile b/drivers/net/kdp/Makefile
new file mode 100644
index 0000000..ac44c0f
--- /dev/null
+++ b/drivers/net/kdp/Makefile
@@ -0,0 +1,60 @@ 
+#   BSD LICENSE
+#
+#   Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_kdp.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_kdp_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_KDP) += rte_eth_kdp.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_KDP) += rte_kdp.c
+
+#
+# Export include files
+#
+SYMLINK-y-include +=
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KDP) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KDP) += lib/librte_ether
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/kdp/rte_eth_kdp.c b/drivers/net/kdp/rte_eth_kdp.c
new file mode 100644
index 0000000..463bac8
--- /dev/null
+++ b/drivers/net/kdp/rte_eth_kdp.c
@@ -0,0 +1,405 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_dev.h>
+#include <rte_kvargs.h>
+#include <rte_spinlock.h>
+#include "rte_kdp.h"
+
+#define MAX_PACKET_SZ 2048
+
+struct kdp_queue {
+	struct pmd_internals *internals;
+	struct rte_mempool *mb_pool;
+
+	uint64_t rx_pkts;
+	uint64_t rx_bytes;
+	uint64_t rx_err_pkts;
+	uint64_t tx_pkts;
+	uint64_t tx_bytes;
+	uint64_t tx_err_pkts;
+};
+
+struct pmd_internals {
+	struct rte_kdp *kdp;
+
+	struct kdp_queue rx_queues[RTE_MAX_QUEUES_PER_PORT];
+	struct kdp_queue tx_queues[RTE_MAX_QUEUES_PER_PORT];
+};
+
+static struct ether_addr eth_addr = { .addr_bytes = {0} };
+static const char *drivername = "KDP PMD";
+static struct rte_eth_link pmd_link = {
+		.link_speed = 10000,
+		.link_duplex = ETH_LINK_FULL_DUPLEX,
+		.link_status = 0
+};
+
+static uint16_t
+eth_kdp_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+	struct kdp_queue *kdp_q = q;
+	struct pmd_internals *internals = kdp_q->internals;
+	uint16_t nb_pkts;
+
+	nb_pkts = rte_kdp_rx_burst(internals->kdp, bufs, nb_bufs);
+
+	kdp_q->rx_pkts += nb_pkts;
+	kdp_q->rx_err_pkts += nb_bufs - nb_pkts;
+
+	return nb_pkts;
+}
+
+static uint16_t
+eth_kdp_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+	struct kdp_queue *kdp_q = q;
+	struct pmd_internals *internals = kdp_q->internals;
+	uint16_t nb_pkts;
+
+	nb_pkts =  rte_kdp_tx_burst(internals->kdp, bufs, nb_bufs);
+
+	kdp_q->tx_pkts += nb_pkts;
+	kdp_q->tx_err_pkts += nb_bufs - nb_pkts;
+
+	return nb_pkts;
+}
+
+static int
+kdp_start(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct rte_kdp_conf conf;
+	uint16_t port_id = dev->data->port_id;
+	int ret;
+
+	snprintf(conf.name, RTE_KDP_NAMESIZE, "kdp%u", port_id);
+	conf.force_bind = 0;
+	conf.group_id = port_id;
+	conf.mbuf_size = MAX_PACKET_SZ;
+
+	ret = rte_kdp_start(internals->kdp, internals->rx_queues[0].mb_pool,
+			&conf);
+	if (ret)
+		RTE_LOG(INFO, PMD, "Fail to create kdp for port: %d\n",
+				port_id);
+	return ret;
+}
+
+static int
+eth_dev_start(struct rte_eth_dev *dev)
+{
+	int ret;
+
+	ret = kdp_start(dev);
+	if (ret)
+		return -1;
+
+	dev->data->dev_link.link_status = 1;
+	return 0;
+}
+
+static void
+eth_dev_stop(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	rte_kdp_release(internals->kdp);
+	dev->data->dev_link.link_status = 0;
+}
+
+static void
+eth_dev_close(struct rte_eth_dev *dev __rte_unused)
+{
+	rte_kdp_close();
+}
+
+static int
+eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+	return 0;
+}
+
+static void
+eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+	struct rte_eth_dev_data *data = dev->data;
+
+	dev_info->driver_name = data->drv_name;
+	dev_info->max_mac_addrs = 1;
+	dev_info->max_rx_pktlen = (uint32_t)-1;
+	dev_info->max_rx_queues = data->nb_rx_queues;
+	dev_info->max_tx_queues = data->nb_tx_queues;
+	dev_info->min_rx_bufsize = 0;
+	dev_info->pci_dev = NULL;
+}
+
+static int
+eth_rx_queue_setup(struct rte_eth_dev *dev,
+		uint16_t rx_queue_id __rte_unused,
+		uint16_t nb_rx_desc __rte_unused,
+		unsigned int socket_id __rte_unused,
+		const struct rte_eth_rxconf *rx_conf __rte_unused,
+		struct rte_mempool *mb_pool)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct kdp_queue *q;
+
+	q = &internals->rx_queues[rx_queue_id];
+	q->internals = internals;
+	q->mb_pool = mb_pool;
+
+	dev->data->rx_queues[rx_queue_id] = q;
+
+	return 0;
+}
+
+static int
+eth_tx_queue_setup(struct rte_eth_dev *dev,
+		uint16_t tx_queue_id,
+		uint16_t nb_tx_desc __rte_unused,
+		unsigned int socket_id __rte_unused,
+		const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct kdp_queue *q;
+
+	q = &internals->tx_queues[tx_queue_id];
+	q->internals = internals;
+
+	dev->data->tx_queues[tx_queue_id] = q;
+
+	return 0;
+}
+
+static void
+eth_queue_release(void *q __rte_unused)
+{
+}
+
+static int
+eth_link_update(struct rte_eth_dev *dev __rte_unused,
+		int wait_to_complete __rte_unused)
+{
+	return 0;
+}
+
+static void
+eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+	unsigned i, num_stats;
+	unsigned long rx_packets_total = 0, rx_bytes_total = 0;
+	unsigned long tx_packets_total = 0, tx_bytes_total = 0;
+	unsigned long tx_packets_err_total = 0;
+	struct rte_eth_dev_data *data = dev->data;
+	struct kdp_queue *q;
+
+	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
+			data->nb_rx_queues);
+	for (i = 0; i < num_stats; i++) {
+		q = data->rx_queues[i];
+		stats->q_ipackets[i] = q->rx_pkts;
+		stats->q_ibytes[i] = q->rx_bytes;
+		rx_packets_total += stats->q_ipackets[i];
+		rx_bytes_total += stats->q_ibytes[i];
+	}
+
+	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
+			data->nb_tx_queues);
+	for (i = 0; i < num_stats; i++) {
+		q = data->tx_queues[i];
+		stats->q_opackets[i] = q->tx_pkts;
+		stats->q_obytes[i] = q->tx_bytes;
+		stats->q_errors[i] = q->tx_err_pkts;
+		tx_packets_total += stats->q_opackets[i];
+		tx_bytes_total += stats->q_obytes[i];
+		tx_packets_err_total += stats->q_errors[i];
+	}
+
+	stats->ipackets = rx_packets_total;
+	stats->ibytes = rx_bytes_total;
+	stats->opackets = tx_packets_total;
+	stats->obytes = tx_bytes_total;
+	stats->oerrors = tx_packets_err_total;
+}
+
+static void
+eth_stats_reset(struct rte_eth_dev *dev)
+{
+	unsigned i;
+	struct rte_eth_dev_data *data = dev->data;
+	struct kdp_queue *q;
+
+	for (i = 0; i < data->nb_rx_queues; i++) {
+		q = data->rx_queues[i];
+		q->rx_pkts = 0;
+		q->rx_bytes = 0;
+	}
+	for (i = 0; i < data->nb_tx_queues; i++) {
+		q = data->rx_queues[i];
+		q->tx_pkts = 0;
+		q->tx_bytes = 0;
+		q->tx_err_pkts = 0;
+	}
+}
+
+static const struct eth_dev_ops ops = {
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_close = eth_dev_close,
+	.dev_configure = eth_dev_configure,
+	.dev_infos_get = eth_dev_info,
+	.rx_queue_setup = eth_rx_queue_setup,
+	.tx_queue_setup = eth_tx_queue_setup,
+	.rx_queue_release = eth_queue_release,
+	.tx_queue_release = eth_queue_release,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+};
+
+static struct rte_eth_dev *
+eth_dev_kdp_create(const char *name, unsigned numa_node)
+{
+	uint16_t nb_rx_queues = 1;
+	uint16_t nb_tx_queues = 1;
+	struct rte_eth_dev_data *data = NULL;
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	if (name == NULL)
+		return NULL;
+
+	RTE_LOG(INFO, PMD, "Creating kdp ethdev on numa socket %u\n",
+			numa_node);
+
+	/*
+	 * now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+	internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
+	if (internals == NULL)
+		goto error;
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+	if (eth_dev == NULL)
+		goto error;
+
+	data = eth_dev->data;
+	data->dev_private = internals;
+	data->nb_rx_queues = nb_rx_queues;
+	data->nb_tx_queues = nb_tx_queues;
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr;
+
+	eth_dev->dev_ops = &ops;
+	eth_dev->driver = NULL;
+
+	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
+	data->kdrv = RTE_KDRV_NONE;
+	data->drv_name = drivername;
+	data->numa_node = numa_node;
+
+	eth_dev->rx_pkt_burst = eth_kdp_rx;
+	eth_dev->tx_pkt_burst = eth_kdp_tx;
+
+	return eth_dev;
+
+error:
+	rte_free(internals);
+
+	return NULL;
+}
+
+static int
+rte_pmd_kdp_devinit(const char *name, const char *params __rte_unused)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+	struct pmd_internals *internals;
+	struct rte_kdp *kdp;
+
+	RTE_LOG(INFO, PMD, "Initializing eth_kdp for %s\n", name);
+
+	eth_dev = eth_dev_kdp_create(name, rte_socket_id());
+	if (eth_dev == NULL)
+		return -1;
+
+	kdp = rte_kdp_init(eth_dev->data->port_id);
+	if (kdp == NULL) {
+		rte_eth_dev_release_port(eth_dev);
+		rte_free(eth_dev->data->dev_private);
+
+		/* Not return error to prevent panic in rte_eal_init()  */
+		return 0;
+	}
+
+	internals = eth_dev->data->dev_private;
+	internals->kdp = kdp;
+	return 0;
+}
+
+static int
+rte_pmd_kdp_devuninit(const char *name)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	if (name == NULL)
+		return -EINVAL;
+
+	RTE_LOG(INFO, PMD, "Un-Initializing eth_kdp for %s\n", name);
+
+	/* find the ethdev entry */
+	eth_dev = rte_eth_dev_allocated(name);
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_eth_dev_release_port(eth_dev);
+
+	rte_free(eth_dev->data->dev_private);
+
+	return 0;
+}
+
+static struct rte_driver pmd_kdp_drv = {
+	.name = "eth_kdp",
+	.type = PMD_VDEV,
+	.init = rte_pmd_kdp_devinit,
+	.uninit = rte_pmd_kdp_devuninit,
+};
+
+PMD_REGISTER_DRIVER(pmd_kdp_drv);
diff --git a/drivers/net/kdp/rte_kdp.c b/drivers/net/kdp/rte_kdp.c
new file mode 100644
index 0000000..3d0a184
--- /dev/null
+++ b/drivers/net/kdp/rte_kdp.c
@@ -0,0 +1,365 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_EXEC_ENV_LINUXAPP
+#error "KDP is not supported"
+#endif
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <sys/ioctl.h>
+
+#include <rte_spinlock.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+
+#include "rte_kdp.h"
+#include "rte_kdp_fifo.h"
+
+#define MAX_MBUF_BURST_NUM     32
+
+/* Maximum number of ring entries */
+#define KDP_FIFO_COUNT_MAX     1024
+#define KDP_FIFO_SIZE          (KDP_FIFO_COUNT_MAX * sizeof(void *) + \
+					sizeof(struct rte_kdp_fifo))
+
+#define KDP_MEM_CHECK(cond) do { if (cond) goto kdp_fail; } while (0)
+
+static volatile int kdp_fd = -1;
+
+static const struct rte_memzone *
+kdp_memzone_reserve(const char *name, size_t len, int socket_id,
+		unsigned flags)
+{
+	const struct rte_memzone *mz = rte_memzone_lookup(name);
+
+	if (mz == NULL)
+		mz = rte_memzone_reserve(name, len, socket_id, flags);
+
+	return mz;
+}
+
+static int
+slot_init(struct rte_kdp_memzone_slot *slot)
+{
+#define OBJNAMSIZ 32
+	char obj_name[OBJNAMSIZ];
+	const struct rte_memzone *mz;
+
+	/* TX RING */
+	snprintf(obj_name, OBJNAMSIZ, "kdp_tx_%d", slot->id);
+	mz = kdp_memzone_reserve(obj_name, KDP_FIFO_SIZE, SOCKET_ID_ANY, 0);
+	KDP_MEM_CHECK(mz == NULL);
+	slot->m_tx_q = mz;
+
+	/* RX RING */
+	snprintf(obj_name, OBJNAMSIZ, "kdp_rx_%d", slot->id);
+	mz = kdp_memzone_reserve(obj_name, KDP_FIFO_SIZE, SOCKET_ID_ANY, 0);
+	KDP_MEM_CHECK(mz == NULL);
+	slot->m_rx_q = mz;
+
+	/* ALLOC RING */
+	snprintf(obj_name, OBJNAMSIZ, "kdp_alloc_%d", slot->id);
+	mz = kdp_memzone_reserve(obj_name, KDP_FIFO_SIZE, SOCKET_ID_ANY, 0);
+	KDP_MEM_CHECK(mz == NULL);
+	slot->m_alloc_q = mz;
+
+	/* FREE RING */
+	snprintf(obj_name, OBJNAMSIZ, "kdp_free_%d", slot->id);
+	mz = kdp_memzone_reserve(obj_name, KDP_FIFO_SIZE, SOCKET_ID_ANY, 0);
+	KDP_MEM_CHECK(mz == NULL);
+	slot->m_free_q = mz;
+
+	return 0;
+
+kdp_fail:
+	return -1;
+
+}
+
+static void
+ring_init(struct rte_kdp *kdp)
+{
+	struct rte_kdp_memzone_slot *slot = kdp->slot;
+	const struct rte_memzone *mz;
+
+	/* TX RING */
+	mz = slot->m_tx_q;
+	kdp->tx_q = mz->addr;
+	kdp_fifo_init(kdp->tx_q, KDP_FIFO_COUNT_MAX);
+
+	/* RX RING */
+	mz = slot->m_rx_q;
+	kdp->rx_q = mz->addr;
+	kdp_fifo_init(kdp->rx_q, KDP_FIFO_COUNT_MAX);
+
+	/* ALLOC RING */
+	mz = slot->m_alloc_q;
+	kdp->alloc_q = mz->addr;
+	kdp_fifo_init(kdp->alloc_q, KDP_FIFO_COUNT_MAX);
+
+	/* FREE RING */
+	mz = slot->m_free_q;
+	kdp->free_q = mz->addr;
+	kdp_fifo_init(kdp->free_q, KDP_FIFO_COUNT_MAX);
+}
+
+/* Shall be called before any allocation happens */
+struct rte_kdp *
+rte_kdp_init(uint16_t port_id)
+{
+	struct rte_kdp_memzone_slot *slot = NULL;
+	struct rte_kdp *kdp = NULL;
+	int ret;
+
+	/* Check FD and open */
+	if (kdp_fd < 0) {
+		kdp_fd = open("/dev/kdp", O_RDWR);
+		if (kdp_fd < 0) {
+			RTE_LOG(ERR, KDP,"Can not open /dev/kdp\n");
+			return NULL;
+		}
+	}
+
+	slot = rte_malloc(NULL, sizeof(struct rte_kdp_memzone_slot), 0);
+	KDP_MEM_CHECK(slot == NULL);
+	slot->id = port_id;
+
+	kdp = rte_malloc(NULL, sizeof(struct rte_kdp), 0);
+	KDP_MEM_CHECK(kdp == NULL);
+	kdp->slot = slot;
+
+	ret = slot_init(slot);
+	KDP_MEM_CHECK(ret < 0);
+
+	ring_init(kdp);
+
+	return kdp;
+
+kdp_fail:
+	rte_free(slot);
+	rte_free(kdp);
+	RTE_LOG(ERR, KDP, "Unable to allocate memory\n");
+	return NULL;
+}
+
+static void
+kdp_allocate_mbufs(struct rte_kdp *kdp)
+{
+	int i, ret;
+	struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
+
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
+			 offsetof(struct rte_kdp_mbuf, pool));
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_addr) !=
+			 offsetof(struct rte_kdp_mbuf, buf_addr));
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, next) !=
+			 offsetof(struct rte_kdp_mbuf, next));
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
+			 offsetof(struct rte_kdp_mbuf, data_off));
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+			 offsetof(struct rte_kdp_mbuf, data_len));
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+			 offsetof(struct rte_kdp_mbuf, pkt_len));
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+			 offsetof(struct rte_kdp_mbuf, ol_flags));
+
+	/* Check if pktmbuf pool has been configured */
+	if (kdp->pktmbuf_pool == NULL) {
+		RTE_LOG(ERR, KDP, "No valid mempool for allocating mbufs\n");
+		return;
+	}
+
+	for (i = 0; i < MAX_MBUF_BURST_NUM; i++) {
+		pkts[i] = rte_pktmbuf_alloc(kdp->pktmbuf_pool);
+		if (unlikely(pkts[i] == NULL)) {
+			/* Out of memory */
+			RTE_LOG(ERR, KDP, "Out of memory\n");
+			break;
+		}
+	}
+
+	/* No pkt mbuf alocated */
+	if (i <= 0)
+		return;
+
+	ret = kdp_fifo_put(kdp->alloc_q, (void **)pkts, i);
+
+	/* Check if any mbufs not put into alloc_q, and then free them */
+	if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
+		int j;
+
+		for (j = ret; j < i; j++)
+			rte_pktmbuf_free(pkts[j]);
+	}
+}
+
+int
+rte_kdp_start(struct rte_kdp *kdp, struct rte_mempool *pktmbuf_pool,
+	      const struct rte_kdp_conf *conf)
+{
+	struct rte_kdp_memzone_slot *slot = kdp->slot;
+	struct rte_kdp_device_info dev_info;
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+	const struct rte_memzone *mz;
+	int ret;
+
+	if (!kdp || !pktmbuf_pool || !conf || !conf->name[0])
+		return -1;
+
+	snprintf(kdp->name, RTE_KDP_NAMESIZE, "%s", conf->name);
+	kdp->pktmbuf_pool = pktmbuf_pool;
+	kdp->group_id = conf->group_id;
+
+	memset(&dev_info, 0, sizeof(dev_info));
+	dev_info.core_id = conf->core_id;
+	dev_info.force_bind = conf->force_bind;
+	dev_info.group_id = conf->group_id;
+	dev_info.mbuf_size = conf->mbuf_size;
+	snprintf(dev_info.name, RTE_KDP_NAMESIZE, "%s", conf->name);
+
+	dev_info.tx_phys = slot->m_tx_q->phys_addr;
+	dev_info.rx_phys = slot->m_rx_q->phys_addr;
+	dev_info.alloc_phys = slot->m_alloc_q->phys_addr;
+	dev_info.free_phys = slot->m_free_q->phys_addr;
+
+	/* MBUF mempool */
+	snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_OBJ_NAME,
+		pktmbuf_pool->name);
+	mz = rte_memzone_lookup(mz_name);
+	KDP_MEM_CHECK(mz == NULL);
+	dev_info.mbuf_va = mz->addr;
+	dev_info.mbuf_phys = mz->phys_addr;
+
+	ret = ioctl(kdp_fd, RTE_KDP_IOCTL_CREATE, &dev_info);
+	KDP_MEM_CHECK(ret < 0);
+
+	kdp->in_use = 1;
+
+	/* Allocate mbufs and then put them into alloc_q */
+	kdp_allocate_mbufs(kdp);
+
+	return 0;
+
+kdp_fail:
+	return -1;
+}
+
+static void
+kdp_free_mbufs(struct rte_kdp *kdp)
+{
+	int i, ret;
+	struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
+
+	ret = kdp_fifo_get(kdp->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
+	if (likely(ret > 0)) {
+		for (i = 0; i < ret; i++)
+			rte_pktmbuf_free(pkts[i]);
+	}
+}
+
+unsigned
+rte_kdp_tx_burst(struct rte_kdp *kdp, struct rte_mbuf **mbufs, unsigned num)
+{
+	unsigned ret = kdp_fifo_put(kdp->rx_q, (void **)mbufs, num);
+
+	/* Get mbufs from free_q and then free them */
+	kdp_free_mbufs(kdp);
+
+	return ret;
+}
+
+unsigned
+rte_kdp_rx_burst(struct rte_kdp *kdp, struct rte_mbuf **mbufs, unsigned num)
+{
+	unsigned ret = kdp_fifo_get(kdp->tx_q, (void **)mbufs, num);
+
+	/* If buffers removed, allocate mbufs and then put them into alloc_q */
+	if (ret)
+		kdp_allocate_mbufs(kdp);
+
+	return ret;
+}
+
+static void
+kdp_free_fifo(struct rte_kdp_fifo *fifo)
+{
+	int ret;
+	struct rte_mbuf *pkt;
+
+	do {
+		ret = kdp_fifo_get(fifo, (void **)&pkt, 1);
+		if (ret)
+			rte_pktmbuf_free(pkt);
+	} while (ret);
+}
+
+int
+rte_kdp_release(struct rte_kdp *kdp)
+{
+	struct rte_kdp_device_info dev_info;
+
+	if (!kdp || !kdp->in_use)
+		return -1;
+
+	snprintf(dev_info.name, sizeof(dev_info.name), "%s", kdp->name);
+	if (ioctl(kdp_fd, RTE_KDP_IOCTL_RELEASE, &dev_info) < 0) {
+		RTE_LOG(ERR, KDP, "Fail to release kdp device\n");
+		return -1;
+	}
+
+	/* mbufs in all fifo should be released, except request/response */
+	kdp_free_fifo(kdp->tx_q);
+	kdp_free_fifo(kdp->rx_q);
+	kdp_free_fifo(kdp->alloc_q);
+	kdp_free_fifo(kdp->free_q);
+
+	rte_free(kdp->slot);
+
+	/* Memset the KDP struct */
+	memset(kdp, 0, sizeof(struct rte_kdp));
+
+	return 0;
+}
+
+void
+rte_kdp_close(void)
+{
+	if (kdp_fd < 0)
+		return;
+
+	close(kdp_fd);
+	kdp_fd = -1;
+}
diff --git a/drivers/net/kdp/rte_kdp.h b/drivers/net/kdp/rte_kdp.h
new file mode 100644
index 0000000..28f959e
--- /dev/null
+++ b/drivers/net/kdp/rte_kdp.h
@@ -0,0 +1,113 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_KDP_H_
+#define _RTE_KDP_H_
+
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+#include <exec-env/rte_kdp_common.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * KDP memzone pool slot
+ */
+struct rte_kdp_memzone_slot {
+	uint32_t id;
+
+	/* Memzones */
+	const struct rte_memzone *m_tx_q;      /**< TX queue */
+	const struct rte_memzone *m_rx_q;      /**< RX queue */
+	const struct rte_memzone *m_alloc_q;   /**< Allocated mbufs queue */
+	const struct rte_memzone *m_free_q;    /**< To be freed mbufs queue */
+};
+
+/**
+ * KDP context
+ */
+struct rte_kdp {
+	char name[RTE_KDP_NAMESIZE];        /**< KDP interface name */
+	struct rte_mempool *pktmbuf_pool;   /**< pkt mbuf mempool */
+	struct rte_kdp_memzone_slot *slot;
+	uint16_t group_id;                  /**< Group ID of KDP devices */
+
+	struct rte_kdp_fifo *tx_q;          /**< TX queue */
+	struct rte_kdp_fifo *rx_q;          /**< RX queue */
+	struct rte_kdp_fifo *alloc_q;       /**< Allocated mbufs queue */
+	struct rte_kdp_fifo *free_q;        /**< To be freed mbufs queue */
+
+	uint8_t in_use;                     /**< kdp in use */
+
+};
+
+/**
+ * Structure for configuring KDP device.
+ */
+struct rte_kdp_conf {
+	/*
+	 * KDP name which will be used in relevant network device.
+	 * Let the name as short as possible, as it will be part of
+	 * memzone name.
+	 */
+	char name[RTE_KDP_NAMESIZE];
+	uint32_t core_id;   /* Core ID to bind kernel thread on */
+	uint16_t group_id;
+	unsigned mbuf_size;
+
+	uint8_t force_bind; /* Flag to bind kernel thread */
+};
+
+struct rte_kdp *rte_kdp_init(uint16_t port_id);
+
+int rte_kdp_start(struct rte_kdp *kdp, struct rte_mempool *pktmbuf_pool,
+	      const struct rte_kdp_conf *conf);
+
+unsigned rte_kdp_rx_burst(struct rte_kdp *kdp,
+		struct rte_mbuf **mbufs, unsigned num);
+
+unsigned rte_kdp_tx_burst(struct rte_kdp *kdp,
+		struct rte_mbuf **mbufs, unsigned num);
+
+int rte_kdp_release(struct rte_kdp *kdp);
+
+void rte_kdp_close(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_KDP_H_ */
diff --git a/drivers/net/kdp/rte_kdp_fifo.h b/drivers/net/kdp/rte_kdp_fifo.h
new file mode 100644
index 0000000..1a7e063
--- /dev/null
+++ b/drivers/net/kdp/rte_kdp_fifo.h
@@ -0,0 +1,91 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Initializes the kdp fifo structure
+ */
+static void
+kdp_fifo_init(struct rte_kdp_fifo *fifo, unsigned size)
+{
+	/* Ensure size is power of 2 */
+	if (size & (size - 1))
+		rte_panic("KDP fifo size must be power of 2\n");
+
+	fifo->write = 0;
+	fifo->read = 0;
+	fifo->len = size;
+	fifo->elem_size = sizeof(void *);
+}
+
+/**
+ * Adds num elements into the fifo. Return the number actually written
+ */
+static inline unsigned
+kdp_fifo_put(struct rte_kdp_fifo *fifo, void **data, unsigned num)
+{
+	unsigned i = 0;
+	unsigned fifo_write = fifo->write;
+	unsigned fifo_read = fifo->read;
+	unsigned new_write = fifo_write;
+
+	for (i = 0; i < num; i++) {
+		new_write = (new_write + 1) & (fifo->len - 1);
+
+		if (new_write == fifo_read)
+			break;
+		fifo->buffer[fifo_write] = data[i];
+		fifo_write = new_write;
+	}
+	fifo->write = fifo_write;
+	return i;
+}
+
+/**
+ * Get up to num elements from the fifo. Return the number actully read
+ */
+static inline unsigned
+kdp_fifo_get(struct rte_kdp_fifo *fifo, void **data, unsigned num)
+{
+	unsigned i = 0;
+	unsigned new_read = fifo->read;
+	unsigned fifo_write = fifo->write;
+	for (i = 0; i < num; i++) {
+		if (new_read == fifo_write)
+			break;
+
+		data[i] = fifo->buffer[new_read];
+		new_read = (new_read + 1) & (fifo->len - 1);
+	}
+	fifo->read = new_read;
+	return i;
+}
diff --git a/drivers/net/kdp/rte_pmd_kdp_version.map b/drivers/net/kdp/rte_pmd_kdp_version.map
new file mode 100644
index 0000000..0812bb1
--- /dev/null
+++ b/drivers/net/kdp/rte_pmd_kdp_version.map
@@ -0,0 +1,4 @@ 
+DPDK_2.3 {
+
+	local: *;
+};
diff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h
index 2e47e7f..5a0048b 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -1,7 +1,7 @@ 
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
  *   All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
@@ -79,6 +79,7 @@  extern struct rte_logs rte_logs;
 #define RTE_LOGTYPE_PIPELINE 0x00008000 /**< Log related to pipeline. */
 #define RTE_LOGTYPE_MBUF    0x00010000 /**< Log related to mbuf. */
 #define RTE_LOGTYPE_CRYPTODEV 0x00020000 /**< Log related to cryptodev. */
+#define RTE_LOGTYPE_KDP     0x00080000 /**< Log related to KDP. */
 
 /* these log types can be used in an application */
 #define RTE_LOGTYPE_USER1   0x01000000 /**< User-defined log type 1. */
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 8ecab41..eb18972 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -1,6 +1,6 @@ 
 #   BSD LICENSE
 #
-#   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+#   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
 #   Copyright(c) 2014-2015 6WIND S.A.
 #   All rights reserved.
 #
@@ -154,6 +154,7 @@  _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_PCAP)       += -lrte_pmd_pcap
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET)  += -lrte_pmd_af_packet
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL)       += -lrte_pmd_null
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT)        += -lrte_pmd_qat
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KDP)        += -lrte_pmd_kdp
 
 # AESNI MULTI BUFFER is dependent on the IPSec_MB library
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB)   += -lrte_pmd_aesni_mb