get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/131462/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 131462,
    "url": "http://patches.dpdk.org/api/patches/131462/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230815165050.86595-6-yuying.zhang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230815165050.86595-6-yuying.zhang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230815165050.86595-6-yuying.zhang@intel.com",
    "date": "2023-08-15T16:50:46",
    "name": "[v4,5/9] net/cpfl: add fxp rule module",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "fe2aee3487160743b637f4afe0df322bdca0de8f",
    "submitter": {
        "id": 1844,
        "url": "http://patches.dpdk.org/api/people/1844/?format=api",
        "name": "Zhang, Yuying",
        "email": "yuying.zhang@intel.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230815165050.86595-6-yuying.zhang@intel.com/mbox/",
    "series": [
        {
            "id": 29518,
            "url": "http://patches.dpdk.org/api/series/29518/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29518",
            "date": "2023-08-15T16:50:41",
            "name": "add rte flow support for cpfl",
            "version": 4,
            "mbox": "http://patches.dpdk.org/series/29518/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/131462/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/131462/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id A2B2F425A3;\n\tFri, 15 Sep 2023 10:48:03 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 716F340A71;\n\tFri, 15 Sep 2023 10:47:49 +0200 (CEST)",
            "from mgamail.intel.com (mgamail.intel.com [134.134.136.31])\n by mails.dpdk.org (Postfix) with ESMTP id 180AF4067A\n for <dev@dpdk.org>; Fri, 15 Sep 2023 10:47:43 +0200 (CEST)",
            "from orsmga007.jf.intel.com ([10.7.209.58])\n by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 15 Sep 2023 01:47:36 -0700",
            "from dpdk-pengyuan-mev.sh.intel.com ([10.67.119.128])\n by orsmga007.jf.intel.com with ESMTP; 15 Sep 2023 01:47:33 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1694767664; x=1726303664;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=TYL1szFOFLSZwHLs02/rPSN5JtEnX7R1lYyQurM3D5E=;\n b=Et8DEPJVBtpDdiuX8DK6MsqhnuqKS2obmUfrheobHPv6b6i3AAvB7Jaa\n t3ZUib+2IJQ0w0QcJkwQEr1IM3O8dbe+ADeplrw9eMmi8N34/h3GH2ZeC\n uenCD3j/ar/IPN/RsB9c8Ypq+3XiPJtuYQuEpyMAZt8xaiah0jhfRNWpm\n Y33ePdGj0OkXZP3JyA4pcvIaN66cWLgClECAVrxOlmQI34DOqtTtKi0z3\n +nuAH3FKuNRDKBUxl8cohe72HYqR1byS9R/hvjgok0zPxU2kjF3hVLWXz\n cNd7m1ykI4WmiUK0uXtNVJT6X/T7Z6B7QxIz25//ljSRu/5BT3d0IaBGt A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6600,9927,10833\"; a=\"443265008\"",
            "E=Sophos;i=\"6.02,148,1688454000\"; d=\"scan'208\";a=\"443265008\"",
            "E=McAfee;i=\"6600,9927,10833\"; a=\"738254400\"",
            "E=Sophos;i=\"6.02,148,1688454000\"; d=\"scan'208\";a=\"738254400\""
        ],
        "X-ExtLoop1": "1",
        "From": "\"Zhang, Yuying\" <yuying.zhang@intel.com>",
        "To": "yuying.zhang@intel.com, dev@dpdk.org, qi.z.zhang@intel.com,\n beilei.xing@intel.com, jingjing.wu@intel.com",
        "Cc": "mingxia.liu@intel.com",
        "Subject": "[PATCH v4 5/9] net/cpfl: add fxp rule module",
        "Date": "Tue, 15 Aug 2023 16:50:46 +0000",
        "Message-Id": "<20230815165050.86595-6-yuying.zhang@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230815165050.86595-1-yuying.zhang@intel.com>",
        "References": "<20230906093407.3635038-1-wenjing.qiao@intel.com>\n <20230815165050.86595-1-yuying.zhang@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Yuying Zhang <yuying.zhang@intel.com>\n\nAdded low level fxp module for rule packing / creation / destroying.\n\nSigned-off-by: Yuying Zhang <yuying.zhang@intel.com>\n---\n drivers/net/cpfl/cpfl_controlq.c | 424 +++++++++++++++++++++++++++++++\n drivers/net/cpfl/cpfl_controlq.h |  24 ++\n drivers/net/cpfl/cpfl_ethdev.c   |  31 +++\n drivers/net/cpfl/cpfl_ethdev.h   |   6 +\n drivers/net/cpfl/cpfl_fxp_rule.c | 296 +++++++++++++++++++++\n drivers/net/cpfl/cpfl_fxp_rule.h |  68 +++++\n drivers/net/cpfl/meson.build     |   1 +\n 7 files changed, 850 insertions(+)\n create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c\n create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h",
    "diff": "diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c\nindex 476c78f235..ed76282b0c 100644\n--- a/drivers/net/cpfl/cpfl_controlq.c\n+++ b/drivers/net/cpfl/cpfl_controlq.c\n@@ -331,6 +331,402 @@ cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,\n \treturn status;\n }\n \n+/**\n+ * cpfl_ctlq_send - send command to Control Queue (CTQ)\n+ * @hw: pointer to hw struct\n+ * @cq: handle to control queue struct to send on\n+ * @num_q_msg: number of messages to send on control queue\n+ * @q_msg: pointer to array of queue messages to be sent\n+ *\n+ * The caller is expected to allocate DMAable buffers and pass them to the\n+ * send routine via the q_msg struct / control queue specific data struct.\n+ * The control queue will hold a reference to each send message until\n+ * the completion for that message has been cleaned.\n+ */\n+int\n+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])\n+{\n+\tstruct idpf_ctlq_desc *desc;\n+\tint num_desc_avail = 0;\n+\tint status = 0;\n+\tint i = 0;\n+\n+\tif (!cq || !cq->ring_size)\n+\t\treturn -ENOBUFS;\n+\n+\tidpf_acquire_lock(&cq->cq_lock);\n+\n+\t/* Ensure there are enough descriptors to send all messages */\n+\tnum_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);\n+\tif (num_desc_avail == 0 || num_desc_avail < num_q_msg) {\n+\t\tstatus = -ENOSPC;\n+\t\tgoto sq_send_command_out;\n+\t}\n+\n+\tfor (i = 0; i < num_q_msg; i++) {\n+\t\tstruct idpf_ctlq_msg *msg = &q_msg[i];\n+\t\tuint64_t msg_cookie;\n+\n+\t\tdesc = IDPF_CTLQ_DESC(cq, cq->next_to_use);\n+\t\tdesc->opcode = CPU_TO_LE16(msg->opcode);\n+\t\tdesc->pfid_vfid = CPU_TO_LE16(msg->func_id);\n+\t\tmsg_cookie = *(uint64_t *)&msg->cookie;\n+\t\tdesc->cookie_high =\n+\t\t\tCPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));\n+\t\tdesc->cookie_low =\n+\t\t\tCPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));\n+\t\tdesc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<\n+\t\t\t\tIDPF_CTLQ_FLAG_HOST_ID_S);\n+\t\tif (msg->data_len) {\n+\t\t\tstruct idpf_dma_mem *buff = msg->ctx.indirect.payload;\n+\n+\t\t\tdesc->datalen |= CPU_TO_LE16(msg->data_len);\n+\t\t\tdesc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);\n+\t\t\tdesc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);\n+\t\t\t/* Update the address values in the desc with the pa\n+\t\t\t * value for respective buffer\n+\t\t\t */\n+\t\t\tdesc->params.indirect.addr_high =\n+\t\t\t\tCPU_TO_LE32(IDPF_HI_DWORD(buff->pa));\n+\t\t\tdesc->params.indirect.addr_low =\n+\t\t\t\tCPU_TO_LE32(IDPF_LO_DWORD(buff->pa));\n+\t\t\tidpf_memcpy(&desc->params, msg->ctx.indirect.context,\n+\t\t\t\t    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);\n+\t\t} else {\n+\t\t\tidpf_memcpy(&desc->params, msg->ctx.direct,\n+\t\t\t\t    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);\n+\t\t}\n+\n+\t\t/* Store buffer info */\n+\t\tcq->bi.tx_msg[cq->next_to_use] = msg;\n+\t\t(cq->next_to_use)++;\n+\t\tif (cq->next_to_use == cq->ring_size)\n+\t\t\tcq->next_to_use = 0;\n+\t}\n+\n+\t/* Force memory write to complete before letting hardware\n+\t * know that there are new descriptors to fetch.\n+\t */\n+\tidpf_wmb();\n+\twr32(hw, cq->reg.tail, cq->next_to_use);\n+\n+sq_send_command_out:\n+\tidpf_release_lock(&cq->cq_lock);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write\n+ * back for the requested queue\n+ * @cq: pointer to the specific Control queue\n+ * @clean_count: (input|output) number of descriptors to clean as input, and\n+ * number of descriptors actually cleaned as output\n+ * @msg_status: (output) pointer to msg pointer array to be populated; needs\n+ * to be allocated by caller\n+ * @force: (input) clean descriptors which were not done yet. Use with caution\n+ * in kernel mode only\n+ *\n+ * Returns an array of message pointers associated with the cleaned\n+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned\n+ * descriptors.  The status will be returned for each; any messages that failed\n+ * to send will have a non-zero status. The caller is expected to free original\n+ * ctlq_msgs and free or reuse the DMA buffers.\n+ */\n+static int\n+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,\n+\t\t     struct idpf_ctlq_msg *msg_status[], bool force)\n+{\n+\tstruct idpf_ctlq_desc *desc;\n+\tuint16_t i = 0, num_to_clean;\n+\tuint16_t ntc, desc_err;\n+\tint ret = 0;\n+\n+\tif (!cq || !cq->ring_size)\n+\t\treturn -ENOBUFS;\n+\n+\tif (*clean_count == 0)\n+\t\treturn 0;\n+\tif (*clean_count > cq->ring_size)\n+\t\treturn -EINVAL;\n+\n+\tidpf_acquire_lock(&cq->cq_lock);\n+\tntc = cq->next_to_clean;\n+\tnum_to_clean = *clean_count;\n+\n+\tfor (i = 0; i < num_to_clean; i++) {\n+\t\t/* Fetch next descriptor and check if marked as done */\n+\t\tdesc = IDPF_CTLQ_DESC(cq, ntc);\n+\t\tif (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))\n+\t\t\tbreak;\n+\n+\t\tdesc_err = LE16_TO_CPU(desc->ret_val);\n+\t\tif (desc_err) {\n+\t\t\t/* strip off FW internal code */\n+\t\t\tdesc_err &= 0xff;\n+\t\t}\n+\n+\t\tmsg_status[i] = cq->bi.tx_msg[ntc];\n+\t\tif (!msg_status[i])\n+\t\t\tbreak;\n+\t\tmsg_status[i]->status = desc_err;\n+\t\tcq->bi.tx_msg[ntc] = NULL;\n+\t\t/* Zero out any stale data */\n+\t\tidpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);\n+\t\tntc++;\n+\t\tif (ntc == cq->ring_size)\n+\t\t\tntc = 0;\n+\t}\n+\n+\tcq->next_to_clean = ntc;\n+\tidpf_release_lock(&cq->cq_lock);\n+\n+\t/* Return number of descriptors actually cleaned */\n+\t*clean_count = i;\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the\n+ * requested queue\n+ * @cq: pointer to the specific Control queue\n+ * @clean_count: (input|output) number of descriptors to clean as input, and\n+ * number of descriptors actually cleaned as output\n+ * @msg_status: (output) pointer to msg pointer array to be populated; needs\n+ * to be allocated by caller\n+ *\n+ * Returns an array of message pointers associated with the cleaned\n+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned\n+ * descriptors.  The status will be returned for each; any messages that failed\n+ * to send will have a non-zero status. The caller is expected to free original\n+ * ctlq_msgs and free or reuse the DMA buffers.\n+ */\n+int\n+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,\n+\t\t   struct idpf_ctlq_msg *msg_status[])\n+{\n+\treturn __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);\n+}\n+\n+/**\n+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring\n+ * @hw: pointer to hw struct\n+ * @cq: pointer to control queue handle\n+ * @buff_count: (input|output) input is number of buffers caller is trying to\n+ * return; output is number of buffers that were not posted\n+ * @buffs: array of pointers to dma mem structs to be given to hardware\n+ *\n+ * Caller uses this function to return DMA buffers to the descriptor ring after\n+ * consuming them; buff_count will be the number of buffers.\n+ *\n+ * Note: this function needs to be called after a receive call even\n+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,\n+ * buffs = NULL to support direct commands\n+ */\n+int\n+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t\tuint16_t *buff_count, struct idpf_dma_mem **buffs)\n+{\n+\tstruct idpf_ctlq_desc *desc;\n+\tuint16_t ntp = cq->next_to_post;\n+\tbool buffs_avail = false;\n+\tuint16_t tbp = ntp + 1;\n+\tint status = 0;\n+\tint i = 0;\n+\n+\tif (*buff_count > cq->ring_size)\n+\t\treturn -EINVAL;\n+\n+\tif (*buff_count > 0)\n+\t\tbuffs_avail = true;\n+\tidpf_acquire_lock(&cq->cq_lock);\n+\tif (tbp >= cq->ring_size)\n+\t\ttbp = 0;\n+\n+\tif (tbp == cq->next_to_clean)\n+\t\t/* Nothing to do */\n+\t\tgoto post_buffs_out;\n+\n+\t/* Post buffers for as many as provided or up until the last one used */\n+\twhile (ntp != cq->next_to_clean) {\n+\t\tdesc = IDPF_CTLQ_DESC(cq, ntp);\n+\t\tif (cq->bi.rx_buff[ntp])\n+\t\t\tgoto fill_desc;\n+\t\tif (!buffs_avail) {\n+\t\t\t/* If the caller hasn't given us any buffers or\n+\t\t\t * there are none left, search the ring itself\n+\t\t\t * for an available buffer to move to this\n+\t\t\t * entry starting at the next entry in the ring\n+\t\t\t */\n+\t\t\ttbp = ntp + 1;\n+\t\t\t/* Wrap ring if necessary */\n+\t\t\tif (tbp >= cq->ring_size)\n+\t\t\t\ttbp = 0;\n+\n+\t\t\twhile (tbp != cq->next_to_clean) {\n+\t\t\t\tif (cq->bi.rx_buff[tbp]) {\n+\t\t\t\t\tcq->bi.rx_buff[ntp] =\n+\t\t\t\t\t\tcq->bi.rx_buff[tbp];\n+\t\t\t\t\tcq->bi.rx_buff[tbp] = NULL;\n+\n+\t\t\t\t\t/* Found a buffer, no need to\n+\t\t\t\t\t * search anymore\n+\t\t\t\t\t */\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\n+\t\t\t\t/* Wrap ring if necessary */\n+\t\t\t\ttbp++;\n+\t\t\t\tif (tbp >= cq->ring_size)\n+\t\t\t\t\ttbp = 0;\n+\t\t\t}\n+\n+\t\t\tif (tbp == cq->next_to_clean)\n+\t\t\t\tgoto post_buffs_out;\n+\t\t} else {\n+\t\t\t/* Give back pointer to DMA buffer */\n+\t\t\tcq->bi.rx_buff[ntp] = buffs[i];\n+\t\t\ti++;\n+\n+\t\t\tif (i >= *buff_count)\n+\t\t\t\tbuffs_avail = false;\n+\t\t}\n+\n+fill_desc:\n+\t\tdesc->flags =\n+\t\t\tCPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);\n+\n+\t\t/* Post buffers to descriptor */\n+\t\tdesc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);\n+\t\tdesc->params.indirect.addr_high =\n+\t\t\tCPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));\n+\t\tdesc->params.indirect.addr_low =\n+\t\t\tCPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));\n+\n+\t\tntp++;\n+\t\tif (ntp == cq->ring_size)\n+\t\t\tntp = 0;\n+\t}\n+\n+post_buffs_out:\n+\t/* Only update tail if buffers were actually posted */\n+\tif (cq->next_to_post != ntp) {\n+\t\tif (ntp)\n+\t\t\t/* Update next_to_post to ntp - 1 since current ntp\n+\t\t\t * will not have a buffer\n+\t\t\t */\n+\t\t\tcq->next_to_post = ntp - 1;\n+\t\telse\n+\t\t\t/* Wrap to end of end ring since current ntp is 0 */\n+\t\t\tcq->next_to_post = cq->ring_size - 1;\n+\n+\t\twr32(hw, cq->reg.tail, cq->next_to_post);\n+\t}\n+\n+\tidpf_release_lock(&cq->cq_lock);\n+\t/* return the number of buffers that were not posted */\n+\t*buff_count = *buff_count - i;\n+\n+\treturn status;\n+}\n+\n+/**\n+ * cpfl_ctlq_recv - receive control queue message call back\n+ * @cq: pointer to control queue handle to receive on\n+ * @num_q_msg: (input|output) input number of messages that should be received;\n+ * output number of messages actually received\n+ * @q_msg: (output) array of received control queue messages on this q;\n+ * needs to be pre-allocated by caller for as many messages as requested\n+ *\n+ * Called by interrupt handler or polling mechanism. Caller is expected\n+ * to free buffers\n+ */\n+int\n+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,\n+\t       struct idpf_ctlq_msg *q_msg)\n+{\n+\tuint16_t num_to_clean, ntc, ret_val, flags;\n+\tstruct idpf_ctlq_desc *desc;\n+\tint ret_code = 0;\n+\tuint16_t i = 0;\n+\n+\tif (!cq || !cq->ring_size)\n+\t\treturn -ENOBUFS;\n+\n+\tif (*num_q_msg == 0)\n+\t\treturn 0;\n+\telse if (*num_q_msg > cq->ring_size)\n+\t\treturn -EINVAL;\n+\n+\t/* take the lock before we start messing with the ring */\n+\tidpf_acquire_lock(&cq->cq_lock);\n+\tntc = cq->next_to_clean;\n+\tnum_to_clean = *num_q_msg;\n+\n+\tfor (i = 0; i < num_to_clean; i++) {\n+\t\t/* Fetch next descriptor and check if marked as done */\n+\t\tdesc = IDPF_CTLQ_DESC(cq, ntc);\n+\t\tflags = LE16_TO_CPU(desc->flags);\n+\t\tif (!(flags & IDPF_CTLQ_FLAG_DD))\n+\t\t\tbreak;\n+\n+\t\tret_val = LE16_TO_CPU(desc->ret_val);\n+\t\tq_msg[i].vmvf_type = (flags &\n+\t\t\t\t     (IDPF_CTLQ_FLAG_FTYPE_VM |\n+\t\t\t\t      IDPF_CTLQ_FLAG_FTYPE_PF)) >>\n+\t\t\t\t      IDPF_CTLQ_FLAG_FTYPE_S;\n+\n+\t\tif (flags & IDPF_CTLQ_FLAG_ERR)\n+\t\t\tret_code = -EBADMSG;\n+\n+\t\tq_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);\n+\t\tq_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);\n+\t\tq_msg[i].opcode = LE16_TO_CPU(desc->opcode);\n+\t\tq_msg[i].data_len = LE16_TO_CPU(desc->datalen);\n+\t\tq_msg[i].status = ret_val;\n+\n+\t\tif (desc->datalen) {\n+\t\t\tidpf_memcpy(q_msg[i].ctx.indirect.context,\n+\t\t\t\t    &desc->params.indirect,\n+\t\t\t\t    IDPF_INDIRECT_CTX_SIZE,\n+\t\t\t\t    IDPF_DMA_TO_NONDMA);\n+\n+\t\t\t/* Assign pointer to dma buffer to ctlq_msg array\n+\t\t\t * to be given to upper layer\n+\t\t\t */\n+\t\t\tq_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];\n+\n+\t\t\t/* Zero out pointer to DMA buffer info;\n+\t\t\t * will be repopulated by post buffers API\n+\t\t\t */\n+\t\t\tcq->bi.rx_buff[ntc] = NULL;\n+\t\t} else {\n+\t\t\tidpf_memcpy(q_msg[i].ctx.direct,\n+\t\t\t\t    desc->params.raw,\n+\t\t\t\t    IDPF_DIRECT_CTX_SIZE,\n+\t\t\t\t    IDPF_DMA_TO_NONDMA);\n+\t\t}\n+\n+\t\t/* Zero out stale data in descriptor */\n+\t\tidpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),\n+\t\t\t    IDPF_DMA_MEM);\n+\n+\t\tntc++;\n+\t\tif (ntc == cq->ring_size)\n+\t\t\tntc = 0;\n+\t};\n+\n+\tcq->next_to_clean = ntc;\n+\tidpf_release_lock(&cq->cq_lock);\n+\t*num_q_msg = i;\n+\tif (*num_q_msg == 0)\n+\t\tret_code = -ENOMSG;\n+\n+\treturn ret_code;\n+}\n+\n int\n cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,\n \t\t    struct idpf_ctlq_info **cq)\n@@ -377,3 +773,31 @@ cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)\n {\n \tcpfl_ctlq_remove(hw, cq);\n }\n+\n+int\n+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])\n+{\n+\treturn cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);\n+}\n+\n+int\n+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,\n+\t\t     struct idpf_ctlq_msg q_msg[])\n+{\n+\treturn cpfl_ctlq_recv(cq, num_q_msg, q_msg);\n+}\n+\n+int\n+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t\t      uint16_t *buff_count, struct idpf_dma_mem **buffs)\n+{\n+\treturn cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);\n+}\n+\n+int\n+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,\n+\t\t\t struct idpf_ctlq_msg *msg_status[])\n+{\n+\treturn cpfl_ctlq_clean_sq(cq, clean_count, msg_status);\n+}\ndiff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h\nindex 930d717f63..740ae6522c 100644\n--- a/drivers/net/cpfl/cpfl_controlq.h\n+++ b/drivers/net/cpfl/cpfl_controlq.h\n@@ -14,6 +14,13 @@\n #define CPFL_DFLT_MBX_RING_LEN\t\t512\n #define CPFL_CFGQ_RING_LEN\t\t512\n \n+/* CRQ/CSQ specific error codes */\n+#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */\n+#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */\n+#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */\n+#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */\n+#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */\n+\n /* Generic queue info structures */\n /* MB, CONFIG and EVENT q do not have extended info */\n struct cpfl_ctlq_create_info {\n@@ -44,8 +51,25 @@ int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,\n int cpfl_ctlq_add(struct idpf_hw *hw,\n \t\t  struct cpfl_ctlq_create_info *qinfo,\n \t\t  struct idpf_ctlq_info **cq);\n+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t   u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);\n+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,\n+\t\t       struct idpf_ctlq_msg *msg_status[]);\n+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t\t    u16 *buff_count, struct idpf_dma_mem **buffs);\n+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,\n+\t\t   struct idpf_ctlq_msg *q_msg);\n int cpfl_vport_ctlq_add(struct idpf_hw *hw,\n \t\t\tstruct cpfl_ctlq_create_info *qinfo,\n \t\t\tstruct idpf_ctlq_info **cq);\n void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);\n+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t\t u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);\n+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,\n+\t\t\t struct idpf_ctlq_msg q_msg[]);\n+\n+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t\t\t  u16 *buff_count, struct idpf_dma_mem **buffs);\n+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,\n+\t\t\t     struct idpf_ctlq_msg *msg_status[]);\n #endif\ndiff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c\nindex 88e2ecf754..cb407e66af 100644\n--- a/drivers/net/cpfl/cpfl_ethdev.c\n+++ b/drivers/net/cpfl/cpfl_ethdev.c\n@@ -16,6 +16,7 @@\n #include <ethdev_private.h>\n #include \"cpfl_rxtx.h\"\n #include \"cpfl_flow.h\"\n+#include \"cpfl_rules.h\"\n \n #define CPFL_REPRESENTOR\t\"representor\"\n #define CPFL_TX_SINGLE_Q\t\"tx_single\"\n@@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)\n \tadapter->cur_vport_nb--;\n \tdev->data->dev_private = NULL;\n \tadapter->vports[vport->sw_idx] = NULL;\n+\tidpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);\n \trte_free(cpfl_vport);\n \n \treturn 0;\n@@ -2466,6 +2468,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,\n \treturn 0;\n }\n \n+int\n+cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size,\n+\t\t\t int batch_size)\n+{\n+\tint i;\n+\n+\tif (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {\n+\t\tPMD_INIT_LOG(ERR, \"Could not alloc dma memory\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tfor (i = 0; i < batch_size; i++) {\n+\t\tdma[i].va = (void *)((uint64_t)orig_dma->va + size * (i + 1));\n+\t\tdma[i].pa = orig_dma->pa + size * (i + 1);\n+\t\tdma[i].size = size;\n+\t\tdma[i].zone = NULL;\n+\t}\n+\treturn 0;\n+}\n+\n static int\n cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)\n {\n@@ -2515,6 +2537,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)\n \trte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,\n \t\t\t    &dev->data->mac_addrs[0]);\n \n+\tmemset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));\n+\tmemset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));\n+\tret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,\n+\t\t\t\t       cpfl_vport->itf.dma,\n+\t\t\t\t       sizeof(union cpfl_rule_cfg_pkt_record),\n+\t\t\t\t       CPFL_FLOW_BATCH_SIZE);\n+\tif (ret < 0)\n+\t\tgoto err_mac_addrs;\n+\n \tif (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {\n \t\tmemset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));\n \t\tret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);\ndiff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h\nindex 7f83d170d7..8eeeac9910 100644\n--- a/drivers/net/cpfl/cpfl_ethdev.h\n+++ b/drivers/net/cpfl/cpfl_ethdev.h\n@@ -147,10 +147,14 @@ enum cpfl_itf_type {\n \n TAILQ_HEAD(cpfl_flow_list, rte_flow);\n \n+#define CPFL_FLOW_BATCH_SIZE  490\n struct cpfl_itf {\n \tenum cpfl_itf_type type;\n \tstruct cpfl_adapter_ext *adapter;\n \tstruct cpfl_flow_list flow_list;\n+\tstruct idpf_dma_mem flow_dma;\n+\tstruct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];\n+\tstruct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];\n \tvoid *data;\n };\n \n@@ -240,6 +244,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,\n int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);\n int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);\n int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);\n+int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma,\n+\t\t\t     uint32_t size, int batch_size);\n \n #define CPFL_DEV_TO_PCI(eth_dev)\t\t\\\n \tRTE_DEV_TO_PCI((eth_dev)->device)\ndiff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c\nnew file mode 100644\nindex 0000000000..50fac55432\n--- /dev/null\n+++ b/drivers/net/cpfl/cpfl_fxp_rule.c\n@@ -0,0 +1,296 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Intel Corporation\n+ */\n+#include \"cpfl_ethdev.h\"\n+\n+#include \"cpfl_fxp_rule.h\"\n+#include \"cpfl_logs.h\"\n+\n+#define CTLQ_SEND_RETRIES 100\n+#define CTLQ_RECEIVE_RETRIES 100\n+\n+int\n+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,\n+\t\t   struct idpf_ctlq_msg q_msg[])\n+{\n+\tstruct idpf_ctlq_msg **msg_ptr_list;\n+\tu16 clean_count = 0;\n+\tint num_cleaned = 0;\n+\tint retries = 0;\n+\tint ret = 0;\n+\n+\tmsg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));\n+\tif (!msg_ptr_list) {\n+\t\tPMD_INIT_LOG(ERR, \"no memory for cleaning ctlq\");\n+\t\tret = -ENOMEM;\n+\t\tgoto err;\n+\t}\n+\n+\tret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"cpfl_vport_ctlq_send() failed with error: 0x%4x\", ret);\n+\t\tgoto send_err;\n+\t}\n+\n+\twhile (retries <= CTLQ_SEND_RETRIES) {\n+\t\tclean_count = num_q_msg - num_cleaned;\n+\t\tret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,\n+\t\t\t\t\t       &msg_ptr_list[num_cleaned]);\n+\t\tif (ret) {\n+\t\t\tPMD_INIT_LOG(ERR, \"clean ctlq failed: 0x%4x\", ret);\n+\t\t\tgoto send_err;\n+\t\t}\n+\n+\t\tnum_cleaned += clean_count;\n+\t\tretries++;\n+\t\tif (num_cleaned >= num_q_msg)\n+\t\t\tbreak;\n+\t\trte_delay_us_sleep(10);\n+\t}\n+\n+\tif (retries > CTLQ_SEND_RETRIES) {\n+\t\tPMD_INIT_LOG(ERR, \"timed out while polling for completions\");\n+\t\tret = -1;\n+\t\tgoto send_err;\n+\t}\n+\n+send_err:\n+\tif (msg_ptr_list)\n+\t\tfree(msg_ptr_list);\n+err:\n+\treturn ret;\n+}\n+\n+static int\n+cpfl_process_rx_ctlq_msg(u16 num_q_msg, struct idpf_ctlq_msg *q_msg)\n+{\n+\tu16 i;\n+\n+\tif (!num_q_msg || !q_msg)\n+\t\treturn -EINVAL;\n+\n+\tfor (i = 0; i < num_q_msg; i++) {\n+\t\tif (q_msg[i].status == CPFL_CFG_PKT_ERR_OK) {\n+\t\t\tcontinue;\n+\t\t} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_EEXIST &&\n+\t\t\t   q_msg[i].opcode == cpfl_ctlq_sem_add_rule) {\n+\t\t\tPMD_INIT_LOG(ERR, \"The rule has confliction with already existed one\");\n+\t\t\treturn -EINVAL;\n+\t\t} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_ENOTFND &&\n+\t\t\t   q_msg[i].opcode == cpfl_ctlq_sem_del_rule) {\n+\t\t\tPMD_INIT_LOG(ERR, \"The rule has already deleted\");\n+\t\t\treturn -EINVAL;\n+\t\t} else {\n+\t\t\tPMD_INIT_LOG(ERR, \"Invalid rule\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,\n+\t\t      struct idpf_ctlq_msg q_msg[])\n+{\n+\tint retries = 0;\n+\tstruct idpf_dma_mem *dma;\n+\tu16 i;\n+\tuint16_t buff_cnt;\n+\tint ret = 0, handle_rule = 0;\n+\n+\tretries = 0;\n+\twhile (retries <= CTLQ_RECEIVE_RETRIES) {\n+\t\trte_delay_us_sleep(10);\n+\t\tret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);\n+\n+\t\tif (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&\n+\t\t    ret != CPFL_ERR_CTLQ_ERROR) {\n+\t\t\tPMD_INIT_LOG(ERR, \"failed to recv ctrlq msg. err: 0x%4x\\n\", ret);\n+\t\t\tretries++;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tif (ret == CPFL_ERR_CTLQ_NO_WORK) {\n+\t\t\tretries++;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tif (ret == CPFL_ERR_CTLQ_EMPTY)\n+\t\t\tbreak;\n+\n+\t\tret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);\n+\t\tif (ret) {\n+\t\t\tPMD_INIT_LOG(ERR, \"failed to process rx_ctrlq msg\");\n+\t\t\thandle_rule = ret;\n+\t\t}\n+\n+\t\tfor (i = 0; i < num_q_msg; i++) {\n+\t\t\tif (q_msg[i].data_len > 0)\n+\t\t\t\tdma = q_msg[i].ctx.indirect.payload;\n+\t\t\telse\n+\t\t\t\tdma = NULL;\n+\n+\t\t\tbuff_cnt = dma ? 1 : 0;\n+\t\t\tret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);\n+\t\t\tif (ret)\n+\t\t\t\tPMD_INIT_LOG(WARNING, \"could not posted recv bufs\\n\");\n+\t\t}\n+\t\tbreak;\n+\t}\n+\n+\tif (retries > CTLQ_RECEIVE_RETRIES) {\n+\t\tPMD_INIT_LOG(ERR, \"timed out while polling for receive response\");\n+\t\tret = -1;\n+\t}\n+\n+\treturn ret + handle_rule;\n+}\n+\n+static int\n+cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,\n+\t\t   struct idpf_ctlq_msg *msg)\n+{\n+\tstruct cpfl_mod_rule_info *minfo = &rinfo->mod;\n+\tunion cpfl_rule_cfg_pkt_record *blob = NULL;\n+\tstruct cpfl_rule_cfg_data cfg = {0};\n+\n+\t/* prepare rule blob */\n+\tif (!dma->va) {\n+\t\tPMD_INIT_LOG(ERR, \"dma mem passed to %s is null\\n\", __func__);\n+\t\treturn -1;\n+\t}\n+\tblob = (union cpfl_rule_cfg_pkt_record *)dma->va;\n+\tmemset(blob, 0, sizeof(*blob));\n+\tmemset(&cfg, 0, sizeof(cfg));\n+\n+\t/* fill info for both query and add/update */\n+\tcpfl_fill_rule_mod_content(minfo->mod_obj_size,\n+\t\t\t\t   minfo->pin_mod_content,\n+\t\t\t\t   minfo->mod_index,\n+\t\t\t\t   &cfg.ext.mod_content);\n+\n+\t/* only fill content for add/update */\n+\tmemcpy(blob->mod_blob, minfo->mod_content,\n+\t       minfo->mod_content_byte_len);\n+\n+#define NO_HOST_NEEDED 0\n+\t/* pack message */\n+\tcpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,\n+\t\t\t\t       rinfo->cookie,\n+\t\t\t\t       0, /* vsi_id not used for mod */\n+\t\t\t\t       rinfo->port_num,\n+\t\t\t\t       NO_HOST_NEEDED,\n+\t\t\t\t       0, /* time_sel */\n+\t\t\t\t       0, /* time_sel_val */\n+\t\t\t\t       0, /* cache_wr_thru */\n+\t\t\t\t       rinfo->resp_req,\n+\t\t\t\t       (u16)sizeof(*blob),\n+\t\t\t\t       (void *)dma,\n+\t\t\t\t       &cfg.common);\n+\tcpfl_prep_rule_desc(&cfg, msg);\n+\treturn 0;\n+}\n+\n+static int\n+cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,\n+\t\t       struct idpf_ctlq_msg *msg, bool add)\n+{\n+\tunion cpfl_rule_cfg_pkt_record *blob = NULL;\n+\tenum cpfl_ctlq_rule_cfg_opc opc;\n+\tstruct cpfl_rule_cfg_data cfg;\n+\tuint16_t cfg_ctrl;\n+\n+\tif (!dma->va) {\n+\t\tPMD_INIT_LOG(ERR, \"dma mem passed to %s is null\\n\", __func__);\n+\t\treturn -1;\n+\t}\n+\tblob = (union cpfl_rule_cfg_pkt_record *)dma->va;\n+\tmemset(blob, 0, sizeof(*blob));\n+\tmemset(msg, 0, sizeof(*msg));\n+\n+\tif (rinfo->type == CPFL_RULE_TYPE_SEM) {\n+\t\tcfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,\n+\t\t\t\t\t\t\t  rinfo->sem.sub_prof_id,\n+\t\t\t\t\t\t\t  rinfo->sem.pin_to_cache,\n+\t\t\t\t\t\t\t  rinfo->sem.fixed_fetch);\n+\t\tcpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,\n+\t\t\t\t\trinfo->act_bytes, rinfo->act_byte_len,\n+\t\t\t\t\tcfg_ctrl, blob);\n+\t\topc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;\n+\t} else {\n+\t\tPMD_INIT_LOG(ERR, \"not support %d rule.\", rinfo->type);\n+\t\treturn -1;\n+\t}\n+\n+\tcpfl_fill_rule_cfg_data_common(opc,\n+\t\t\t\t       rinfo->cookie,\n+\t\t\t\t       rinfo->vsi,\n+\t\t\t\t       rinfo->port_num,\n+\t\t\t\t       rinfo->host_id,\n+\t\t\t\t       0, /* time_sel */\n+\t\t\t\t       0, /* time_sel_val */\n+\t\t\t\t       0, /* cache_wr_thru */\n+\t\t\t\t       rinfo->resp_req,\n+\t\t\t\t       sizeof(union cpfl_rule_cfg_pkt_record),\n+\t\t\t\t       dma,\n+\t\t\t\t       &cfg.common);\n+\tcpfl_prep_rule_desc(&cfg, msg);\n+\treturn 0;\n+}\n+\n+static int\n+cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,\n+\t       struct idpf_ctlq_msg *msg, bool add)\n+{\n+\tint ret = 0;\n+\n+\tif (rinfo->type == CPFL_RULE_TYPE_SEM) {\n+\t\tif (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)\n+\t\t\tret = -1;\n+\t} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {\n+\t\tif (cpfl_mod_rule_pack(rinfo, dma, msg) < 0)\n+\t\t\tret = -1;\n+\t} else {\n+\t\tPMD_INIT_LOG(ERR, \"Invalid type of rule\");\n+\t\tret = -1;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+int\n+cpfl_rule_process(struct cpfl_itf *itf,\n+\t\t  struct idpf_ctlq_info *tx_cq,\n+\t\t  struct idpf_ctlq_info *rx_cq,\n+\t\t  struct cpfl_rule_info *rinfo,\n+\t\t  int rule_num,\n+\t\t  bool add)\n+{\n+\tstruct idpf_hw *hw = &itf->adapter->base.hw;\n+\tint i;\n+\tint ret = 0;\n+\n+\tif (rule_num == 0)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < rule_num; i++) {\n+\t\tret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add);\n+\t\tif (ret) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Could not pack rule\");\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\tret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to send control message\");\n+\t\treturn ret;\n+\t}\n+\tret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to update rule\");\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h\nnew file mode 100644\nindex 0000000000..ed757b80b1\n--- /dev/null\n+++ b/drivers/net/cpfl/cpfl_fxp_rule.h\n@@ -0,0 +1,68 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Intel Corporation\n+ */\n+\n+#ifndef _CPFL_FXP_RULE_H_\n+#define _CPFL_FXP_RULE_H_\n+\n+#include \"cpfl_rules.h\"\n+\n+#define CPFL_MAX_KEY_LEN 128\n+#define CPFL_MAX_RULE_ACTIONS 32\n+\n+struct cpfl_sem_rule_info {\n+\tuint16_t prof_id;\n+\tuint8_t sub_prof_id;\n+\tuint8_t key[CPFL_MAX_KEY_LEN];\n+\tuint8_t key_byte_len;\n+\tuint8_t pin_to_cache;\n+\tuint8_t fixed_fetch;\n+};\n+\n+#define CPFL_MAX_MOD_CONTENT_LEN 256\n+struct cpfl_mod_rule_info {\n+\tuint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];\n+\tuint8_t mod_content_byte_len;\n+\tuint32_t mod_index;\n+\tuint8_t pin_mod_content;\n+\tuint8_t mod_obj_size;\n+};\n+\n+enum cpfl_rule_type {\n+\tCPFL_RULE_TYPE_NONE,\n+\tCPFL_RULE_TYPE_SEM,\n+\tCPFL_RULE_TYPE_MOD\n+};\n+\n+struct cpfl_rule_info {\n+\tenum cpfl_rule_type type;\n+\tuint64_t cookie;\n+\tuint8_t host_id;\n+\tuint8_t port_num;\n+\tuint8_t resp_req;\n+\t/* TODO: change this to be dynamically allocated/reallocated */\n+\tuint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];\n+\tuint8_t act_byte_len;\n+\t/* vsi is used for lem and lpm rules */\n+\tuint16_t vsi;\n+\tuint8_t clear_mirror_1st_state;\n+\t/* mod related fields */\n+\tunion {\n+\t\tstruct cpfl_mod_rule_info mod;\n+\t\tstruct cpfl_sem_rule_info sem;\n+\t};\n+};\n+\n+extern struct cpfl_vport_ext *vport;\n+\n+int cpfl_rule_process(struct cpfl_itf *itf,\n+\t\t      struct idpf_ctlq_info *tx_cq,\n+\t\t      struct idpf_ctlq_info *rx_cq,\n+\t\t      struct cpfl_rule_info *rinfo,\n+\t\t      int rule_num,\n+\t\t      bool add);\n+int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,\n+\t\t       struct idpf_ctlq_msg q_msg[]);\n+int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,\n+\t\t\t  struct idpf_ctlq_msg q_msg[]);\n+#endif /*CPFL_FXP_RULE_H*/\ndiff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build\nindex 53eb5aecad..a06265e6d5 100644\n--- a/drivers/net/cpfl/meson.build\n+++ b/drivers/net/cpfl/meson.build\n@@ -49,6 +49,7 @@ if js_dep.found()\n         sources += files(\n \t\t'cpfl_flow.c',\n                 'cpfl_flow_parser.c',\n+\t\t'cpfl_fxp_rule.c',\n         )\n         dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)\n         ext_deps += js_dep\n",
    "prefixes": [
        "v4",
        "5/9"
    ]
}