get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/77518/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 77518,
    "url": "http://patches.dpdk.org/api/patches/77518/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1599855987-25976-12-git-send-email-timothy.mcdaniel@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1599855987-25976-12-git-send-email-timothy.mcdaniel@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1599855987-25976-12-git-send-email-timothy.mcdaniel@intel.com",
    "date": "2020-09-11T20:26:16",
    "name": "[11/22] event/dlb2: add port setup",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "07c816c6fc3391c3e9adcfcd2564ccc62f3e97fc",
    "submitter": {
        "id": 826,
        "url": "http://patches.dpdk.org/api/people/826/?format=api",
        "name": "Timothy McDaniel",
        "email": "timothy.mcdaniel@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1599855987-25976-12-git-send-email-timothy.mcdaniel@intel.com/mbox/",
    "series": [
        {
            "id": 12164,
            "url": "http://patches.dpdk.org/api/series/12164/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=12164",
            "date": "2020-09-11T20:26:05",
            "name": "Add DLB2 PMD",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/12164/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/77518/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/77518/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 24CD4A04C1;\n\tFri, 11 Sep 2020 22:32:17 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 8A0391C1FD;\n\tFri, 11 Sep 2020 22:30:34 +0200 (CEST)",
            "from mga04.intel.com (mga04.intel.com [192.55.52.120])\n by dpdk.org (Postfix) with ESMTP id DD1281C126\n for <dev@dpdk.org>; Fri, 11 Sep 2020 22:30:01 +0200 (CEST)",
            "from orsmga005.jf.intel.com ([10.7.209.41])\n by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 11 Sep 2020 13:30:01 -0700",
            "from txasoft-yocto.an.intel.com ([10.123.72.192])\n by orsmga005.jf.intel.com with ESMTP; 11 Sep 2020 13:30:00 -0700"
        ],
        "IronPort-SDR": [
            "\n /c002EFnGJfx0WFe1qJRTgPBpHRuN0z6V70SFlBzyMel2ys1YhPyIdOjpDhUq1MAFFodxEU4hQ\n D6poMEroz5rQ==",
            "\n dEYgTjnDSNjUixVutq95dzPuOw5Isje7M2FQPNDwkZ6MVTFrIxmwWglW3B+eVGZ/ffJTk3PIsg\n 3FX43f+pEf6g=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9741\"; a=\"156244358\"",
            "E=Sophos;i=\"5.76,417,1592895600\"; d=\"scan'208\";a=\"156244358\"",
            "E=Sophos;i=\"5.76,417,1592895600\"; d=\"scan'208\";a=\"481453571\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Timothy McDaniel <timothy.mcdaniel@intel.com>",
        "To": "",
        "Cc": "dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,\n harry.van.haaren@intel.com, jerinj@marvell.com",
        "Date": "Fri, 11 Sep 2020 15:26:16 -0500",
        "Message-Id": "<1599855987-25976-12-git-send-email-timothy.mcdaniel@intel.com>",
        "X-Mailer": "git-send-email 1.7.10",
        "In-Reply-To": "<1599855987-25976-1-git-send-email-timothy.mcdaniel@intel.com>",
        "References": "<1599855987-25976-1-git-send-email-timothy.mcdaniel@intel.com>",
        "Subject": "[dpdk-dev] [PATCH 11/22] event/dlb2: add port setup",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Configure the load balanded (ldb) or directed (dir) port.\nThe consumer queue (CQ) and producer port (PP) are also\nset up here.\n\nSigned-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>\n---\n drivers/event/dlb2/dlb2.c                  | 527 +++++++++++++++++\n drivers/event/dlb2/dlb2_iface.c            |   9 +\n drivers/event/dlb2/dlb2_iface.h            |   8 +\n drivers/event/dlb2/pf/base/dlb2_resource.c | 921 +++++++++++++++++++++++++++++\n drivers/event/dlb2/pf/dlb2_main.c          |  28 +\n drivers/event/dlb2/pf/dlb2_pf.c            | 179 ++++++\n 6 files changed, 1672 insertions(+)",
    "diff": "diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c\nindex 366e194..a4c8833 100644\n--- a/drivers/event/dlb2/dlb2.c\n+++ b/drivers/event/dlb2/dlb2.c\n@@ -1043,6 +1043,532 @@ dlb2_eventdev_queue_setup(struct rte_eventdev *dev,\n \treturn ret;\n }\n \n+static int\n+dlb2_init_consume_qe(struct dlb2_port *qm_port, char *mz_name)\n+{\n+\tstruct dlb2_cq_pop_qe *qe;\n+\n+\tqe = rte_malloc(mz_name,\n+\t\t\tDLB2_NUM_QES_PER_CACHE_LINE *\n+\t\t\t\tsizeof(struct dlb2_cq_pop_qe),\n+\t\t\tRTE_CACHE_LINE_SIZE);\n+\n+\tif (qe == NULL)\t{\n+\t\tDLB2_LOG_ERR(\"dlb2: no memory for consume_qe\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\tqm_port->consume_qe = qe;\n+\n+\tmemset(qe, 0, DLB2_NUM_QES_PER_CACHE_LINE *\n+\t       sizeof(struct dlb2_cq_pop_qe));\n+\n+\tqe->qe_valid = 0;\n+\tqe->qe_frag = 0;\n+\tqe->qe_comp = 0;\n+\tqe->cq_token = 1;\n+\t/* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,\n+\t * and so on.\n+\t */\n+\tqe->tokens = 0;\t/* set at run time */\n+\tqe->meas_lat = 0;\n+\tqe->no_dec = 0;\n+\t/* Completion IDs are disabled */\n+\tqe->cmp_id = 0;\n+\n+\treturn 0;\n+}\n+\n+static int\n+dlb2_init_int_arm_qe(struct dlb2_port *qm_port, char *mz_name)\n+{\n+\tstruct dlb2_enqueue_qe *qe;\n+\n+\tqe = rte_malloc(mz_name,\n+\t\t\tDLB2_NUM_QES_PER_CACHE_LINE *\n+\t\t\t\tsizeof(struct dlb2_enqueue_qe),\n+\t\t\tRTE_CACHE_LINE_SIZE);\n+\n+\tif (qe == NULL) {\n+\t\tDLB2_LOG_ERR(\"dlb2: no memory for complete_qe\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\tqm_port->int_arm_qe = qe;\n+\n+\tmemset(qe, 0, DLB2_NUM_QES_PER_CACHE_LINE *\n+\t       sizeof(struct dlb2_enqueue_qe));\n+\n+\t/* V2 - INT ARM is CQ_TOKEN + FRAG */\n+\tqe->qe_valid = 0;\n+\tqe->qe_frag = 1;\n+\tqe->qe_comp = 0;\n+\tqe->cq_token = 1;\n+\tqe->meas_lat = 0;\n+\tqe->no_dec = 0;\n+\t/* Completion IDs are disabled */\n+\tqe->cmp_id = 0;\n+\n+\treturn 0;\n+}\n+\n+static int\n+dlb2_init_qe_mem(struct dlb2_port *qm_port, char *mz_name)\n+{\n+\tint ret, sz;\n+\n+\tsz = DLB2_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb2_enqueue_qe);\n+\n+\tqm_port->qe4 = rte_malloc(mz_name, sz, RTE_CACHE_LINE_SIZE);\n+\n+\tif (qm_port->qe4 == NULL) {\n+\t\tDLB2_LOG_ERR(\"dlb2: no qe4 memory\\n\");\n+\t\tret = -ENOMEM;\n+\t\tgoto error_exit;\n+\t}\n+\n+\tmemset(qm_port->qe4, 0, sz);\n+\n+\tret = dlb2_init_int_arm_qe(qm_port, mz_name);\n+\tif (ret < 0) {\n+\t\tDLB2_LOG_ERR(\"dlb2: dlb2_init_int_arm_qe ret=%d\\n\",\n+\t\t\t     ret);\n+\t\tgoto error_exit;\n+\t}\n+\n+\tret = dlb2_init_consume_qe(qm_port, mz_name);\n+\tif (ret < 0) {\n+\t\tDLB2_LOG_ERR(\"dlb2: dlb2_init_consume_qe ret=%d\\n\",\n+\t\t\t     ret);\n+\t\tgoto error_exit;\n+\t}\n+\n+\treturn 0;\n+\n+error_exit:\n+\n+\tdlb2_free_qe_mem(qm_port);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,\n+\t\t\tstruct dlb2_eventdev_port *ev_port,\n+\t\t\tuint32_t dequeue_depth,\n+\t\t\tuint32_t enqueue_depth)\n+{\n+\tstruct dlb2_hw_dev *handle = &dlb2->qm_instance;\n+\tstruct dlb2_create_ldb_port_args cfg = {0};\n+\tint ret;\n+\tstruct dlb2_port *qm_port = NULL;\n+\tchar mz_name[RTE_MEMZONE_NAMESIZE];\n+\tuint32_t qm_port_id;\n+\tuint16_t ldb_credit_high_watermark;\n+\tuint16_t dir_credit_high_watermark;\n+\n+\tif (handle == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (dequeue_depth < DLB2_MIN_CQ_DEPTH ||\n+\t    dequeue_depth > DLB2_MAX_INPUT_QUEUE_DEPTH) {\n+\t\tDLB2_LOG_ERR(\"dlb2: invalid dequeue_depth, must be %d-%d\\n\",\n+\t\t\t     DLB2_MIN_CQ_DEPTH, DLB2_MAX_INPUT_QUEUE_DEPTH);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {\n+\t\tDLB2_LOG_ERR(\"dlb2: invalid enqueue_depth, must be at least %d\\n\",\n+\t\t\t     DLB2_MIN_ENQUEUE_DEPTH);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trte_spinlock_lock(&handle->resource_lock);\n+\n+\t/* TODO - additional parameter validation */\n+\t/* We round up to the next power of 2 if necessary */\n+\tcfg.cq_depth = rte_align32pow2(dequeue_depth);\n+\tcfg.cq_depth_threshold = 1;\n+\n+\tcfg.cq_history_list_size = DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;\n+\n+\tif (handle->cos_id == DLB2_COS_DEFAULT)\n+\t\tcfg.cos_id = 0;\n+\telse\n+\t\tcfg.cos_id = handle->cos_id;\n+\n+\tcfg.cos_strict = 0;\n+\n+\t/* User controls the LDB high watermark via enqueue depth. The DIR high\n+\t * watermark is equal, unless the directed credit pool is too small.\n+\t */\n+\tldb_credit_high_watermark = enqueue_depth;\n+\n+\t/* If there are no directed ports, the kernel driver will ignore this\n+\t * port's directed credit settings. Don't use enqueue_depth if it would\n+\t * require more directed credits than are available.\n+\t */\n+\tdir_credit_high_watermark =\n+\t\tRTE_MIN(enqueue_depth,\n+\t\t\thandle->cfg.num_dir_credits / dlb2->num_ports);\n+\n+\t/* Per QM values */\n+\n+\t/* DEBUG\n+\t * DLB2_LOG_ERR(\"create ldb port - grp=%d, devId=%d\\n\",\n+\t * handle->cfg.domain_id, handle->device_id);\n+\t */\n+\n+\tret = dlb2_iface_ldb_port_create(handle, &cfg,  dlb2->poll_mode);\n+\tif (ret < 0) {\n+\t\tDLB2_LOG_ERR(\"dlb2: dlb2_ldb_port_create error, ret=%d (driver status: %s)\\n\",\n+\t\t\t     ret, dlb2_error_strings[cfg.response.status]);\n+\t\tgoto error_exit;\n+\t}\n+\n+\tqm_port_id = cfg.response.id;\n+\n+\tDLB2_LOG_DBG(\"dlb2: ev_port %d uses qm LB port %d <<<<<\\n\",\n+\t\t     ev_port->id, qm_port_id);\n+\n+\tqm_port = &ev_port->qm_port;\n+\tqm_port->ev_port = ev_port; /* back ptr */\n+\tqm_port->dlb2 = dlb2; /* back ptr */\n+\t/*\n+\t * Allocate and init local qe struct(s).\n+\t * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.\n+\t */\n+\n+\tsnprintf(mz_name, sizeof(mz_name), \"%s_ldb_port%d\",\n+\t\t handle->device_name,\n+\t\t ev_port->id);\n+\n+\tret = dlb2_init_qe_mem(qm_port, mz_name);\n+\tif (ret < 0) {\n+\t\tDLB2_LOG_ERR(\"dlb2: init_qe_mem failed, ret=%d\\n\", ret);\n+\t\tgoto error_exit;\n+\t}\n+\n+\tqm_port->id = qm_port_id;\n+\n+\tqm_port->cached_ldb_credits = 0;\n+\tqm_port->cached_dir_credits = 0;\n+\t/* CQs with depth < 8 use an 8-entry queue, but withhold credits so\n+\t * the effective depth is smaller.\n+\t */\n+\tqm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;\n+\tqm_port->cq_idx = 0;\n+\tqm_port->cq_idx_unmasked = 0;\n+\n+\tif (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)\n+\t\tqm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;\n+\telse\n+\t\tqm_port->cq_depth_mask = qm_port->cq_depth - 1;\n+\n+\tqm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);\n+\t/* starting value of gen bit - it toggles at wrap time */\n+\tqm_port->gen_bit = 1;\n+\n+\tqm_port->int_armed = false;\n+\n+\t/* Save off for later use in info and lookup APIs. */\n+\tqm_port->qid_mappings = &dlb2->qm_ldb_to_ev_queue_id[0];\n+\n+\tqm_port->dequeue_depth = dequeue_depth;\n+\tqm_port->token_pop_thresh = dequeue_depth;\n+\n+\tqm_port->owed_tokens = 0;\n+\tqm_port->issued_releases = 0;\n+\n+\t/* Save config message too. */\n+\trte_memcpy(&qm_port->cfg.ldb, &cfg, sizeof(cfg));\n+\n+\t/* update state */\n+\tqm_port->state = PORT_STARTED; /* enabled at create time */\n+\tqm_port->config_state = DLB2_CONFIGURED;\n+\n+\tqm_port->dir_credits = dir_credit_high_watermark;\n+\tqm_port->ldb_credits = ldb_credit_high_watermark;\n+\tqm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;\n+\tqm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;\n+\n+\tDLB2_LOG_DBG(\"dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\\n\",\n+\t\t     qm_port_id,\n+\t\t     dequeue_depth,\n+\t\t     qm_port->ldb_credits,\n+\t\t     qm_port->dir_credits);\n+\n+\trte_spinlock_unlock(&handle->resource_lock);\n+\n+\treturn 0;\n+\n+error_exit:\n+\n+\tif (qm_port)\n+\t\tdlb2_free_qe_mem(qm_port);\n+\n+\trte_spinlock_unlock(&handle->resource_lock);\n+\n+\tDLB2_LOG_ERR(\"dlb2: create ldb port failed!\\n\");\n+\n+\treturn ret;\n+}\n+\n+static void\n+dlb2_port_link_teardown(struct dlb2_eventdev *dlb2,\n+\t\t\tstruct dlb2_eventdev_port *ev_port)\n+{\n+\tstruct dlb2_eventdev_queue *ev_queue;\n+\tint i;\n+\n+\tfor (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {\n+\t\tif (!ev_port->link[i].valid)\n+\t\t\tcontinue;\n+\n+\t\tev_queue = &dlb2->ev_queues[ev_port->link[i].queue_id];\n+\n+\t\tev_port->link[i].valid = false;\n+\t\tev_port->num_links--;\n+\t\tev_queue->num_links--;\n+\t}\n+}\n+\n+static int\n+dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,\n+\t\t\tstruct dlb2_eventdev_port *ev_port,\n+\t\t\tuint32_t dequeue_depth,\n+\t\t\tuint32_t enqueue_depth)\n+{\n+\tstruct dlb2_hw_dev *handle = &dlb2->qm_instance;\n+\tstruct dlb2_create_dir_port_args cfg = {0};\n+\tint ret;\n+\tstruct dlb2_port *qm_port = NULL;\n+\tchar mz_name[RTE_MEMZONE_NAMESIZE];\n+\tuint32_t qm_port_id;\n+\tuint16_t ldb_credit_high_watermark;\n+\tuint16_t dir_credit_high_watermark;\n+\n+\tif (dlb2 == NULL || handle == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (dequeue_depth < DLB2_MIN_CQ_DEPTH ||\n+\t    dequeue_depth > DLB2_MAX_INPUT_QUEUE_DEPTH) {\n+\t\tDLB2_LOG_ERR(\"dlb2: invalid dequeue_depth, must be %d-%d\\n\",\n+\t\t\t     DLB2_MIN_CQ_DEPTH, DLB2_MAX_INPUT_QUEUE_DEPTH);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trte_spinlock_lock(&handle->resource_lock);\n+\n+\t/* Directed queues are configured at link time. */\n+\tcfg.queue_id = -1;\n+\n+\t/* We round up to the next power of 2 if necessary */\n+\tcfg.cq_depth = rte_align32pow2(dequeue_depth);\n+\tcfg.cq_depth_threshold = 1;\n+\n+\t/* User controls the LDB high watermark via enqueue depth. The DIR high\n+\t * watermark is equal, unless the directed credit pool is too small.\n+\t */\n+\tldb_credit_high_watermark = enqueue_depth;\n+\n+\t/* Don't use enqueue_depth if it would require more directed credits\n+\t * than are available.\n+\t */\n+\tdir_credit_high_watermark =\n+\t\tRTE_MIN(enqueue_depth,\n+\t\t\thandle->cfg.num_dir_credits / dlb2->num_ports);\n+\n+\t/* Per QM values */\n+\n+\tret = dlb2_iface_dir_port_create(handle, &cfg,  dlb2->poll_mode);\n+\tif (ret < 0) {\n+\t\tDLB2_LOG_ERR(\"dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\\n\",\n+\t\t\t     ret, dlb2_error_strings[cfg.response.status]);\n+\t\tgoto error_exit;\n+\t}\n+\n+\tqm_port_id = cfg.response.id;\n+\n+\tDLB2_LOG_DBG(\"dlb2: ev_port %d uses qm DIR port %d <<<<<\\n\",\n+\t\t     ev_port->id, qm_port_id);\n+\n+\tqm_port = &ev_port->qm_port;\n+\tqm_port->ev_port = ev_port; /* back ptr */\n+\tqm_port->dlb2 = dlb2;  /* back ptr */\n+\n+\t/*\n+\t * Init local qe struct(s).\n+\t * Note: MOVDIR64 requires the enqueue QE to be aligned\n+\t */\n+\n+\tsnprintf(mz_name, sizeof(mz_name), \"%s_dir_port%d\",\n+\t\t handle->device_name,\n+\t\t ev_port->id);\n+\n+\tret = dlb2_init_qe_mem(qm_port, mz_name);\n+\n+\tif (ret < 0) {\n+\t\tDLB2_LOG_ERR(\"dlb2: init_qe_mem failed, ret=%d\\n\", ret);\n+\t\tgoto error_exit;\n+\t}\n+\n+\tqm_port->id = qm_port_id;\n+\n+\tqm_port->cached_ldb_credits = 0;\n+\tqm_port->cached_dir_credits = 0;\n+\t/* CQs with depth < 8 use an 8-entry queue, but withhold credits so\n+\t * the effective depth is smaller.\n+\t */\n+\tqm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;\n+\tqm_port->cq_idx = 0;\n+\tqm_port->cq_idx_unmasked = 0;\n+\n+\tif (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)\n+\t\tqm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;\n+\telse\n+\t\tqm_port->cq_depth_mask = cfg.cq_depth - 1;\n+\n+\tqm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);\n+\t/* starting value of gen bit - it toggles at wrap time */\n+\tqm_port->gen_bit = 1;\n+\n+\tqm_port->int_armed = false;\n+\n+\t/* Save off for later use in info and lookup APIs. */\n+\tqm_port->qid_mappings = &dlb2->qm_dir_to_ev_queue_id[0];\n+\n+\tqm_port->dequeue_depth = dequeue_depth;\n+\n+\t/* Directed ports are auto-pop, by default. */\n+\tqm_port->token_pop_mode = AUTO_POP;\n+\tqm_port->owed_tokens = 0;\n+\tqm_port->issued_releases = 0;\n+\n+\t/* Save config message too. */\n+\trte_memcpy(&qm_port->cfg.dir, &cfg, sizeof(cfg));\n+\n+\t/* update state */\n+\tqm_port->state = PORT_STARTED; /* enabled at create time */\n+\tqm_port->config_state = DLB2_CONFIGURED;\n+\n+\tqm_port->dir_credits = dir_credit_high_watermark;\n+\tqm_port->ldb_credits = ldb_credit_high_watermark;\n+\tqm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;\n+\tqm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;\n+\n+\tDLB2_LOG_DBG(\"dlb2: created dir port %d, depth = %d cr=%d,%d\\n\",\n+\t\t     qm_port_id,\n+\t\t     dequeue_depth,\n+\t\t     dir_credit_high_watermark,\n+\t\t     ldb_credit_high_watermark);\n+\n+\trte_spinlock_unlock(&handle->resource_lock);\n+\n+\treturn 0;\n+\n+error_exit:\n+\n+\tif (qm_port)\n+\t\tdlb2_free_qe_mem(qm_port);\n+\n+\trte_spinlock_unlock(&handle->resource_lock);\n+\n+\tDLB2_LOG_ERR(\"dlb2: create dir port failed!\\n\");\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb2_eventdev_port_setup(struct rte_eventdev *dev,\n+\t\t\t uint8_t ev_port_id,\n+\t\t\t const struct rte_event_port_conf *port_conf)\n+{\n+\tstruct dlb2_eventdev *dlb2;\n+\tstruct dlb2_eventdev_port *ev_port;\n+\tint ret;\n+\n+\tif (dev == NULL || port_conf == NULL) {\n+\t\tDLB2_LOG_ERR(\"Null parameter\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tdlb2 = dlb2_pmd_priv(dev);\n+\n+\tif (ev_port_id >= DLB2_MAX_NUM_PORTS)\n+\t\treturn -EINVAL;\n+\n+\tif (port_conf->dequeue_depth >\n+\t\tevdev_dlb2_default_info.max_event_port_dequeue_depth ||\n+\t    port_conf->enqueue_depth >\n+\t\tevdev_dlb2_default_info.max_event_port_enqueue_depth)\n+\t\treturn -EINVAL;\n+\n+\tev_port = &dlb2->ev_ports[ev_port_id];\n+\t/* configured? */\n+\tif (ev_port->setup_done) {\n+\t\tDLB2_LOG_ERR(\"evport %d is already configured\\n\", ev_port_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* The reserved token interrupt arming scheme requires that one or more\n+\t * CQ tokens be reserved by the PMD. This limits the amount of CQ space\n+\t * usable by the DLB, so in order to give an *effective* CQ depth equal\n+\t * to the user-requested value, we double CQ depth and reserve half of\n+\t * its tokens. If the user requests the max CQ depth (256) then we\n+\t * cannot double it, so we reserve one token and give an effective\n+\t * depth of 255 entries.\n+\t */\n+\n+\tev_port->qm_port.is_directed = port_conf->event_port_cfg &\n+\t\tRTE_EVENT_PORT_CFG_SINGLE_LINK;\n+\n+\tif (!ev_port->qm_port.is_directed) {\n+\t\tret = dlb2_hw_create_ldb_port(dlb2,\n+\t\t\t\t\t      ev_port,\n+\t\t\t\t\t      port_conf->dequeue_depth,\n+\t\t\t\t\t      port_conf->enqueue_depth);\n+\t\tif (ret < 0) {\n+\t\t\tDLB2_LOG_ERR(\"Failed to create the lB port ve portId=%d\\n\",\n+\t\t\t\t     ev_port_id);\n+\n+\t\t\treturn ret;\n+\t\t}\n+\t} else {\n+\t\tret = dlb2_hw_create_dir_port(dlb2,\n+\t\t\t\t\t      ev_port,\n+\t\t\t\t\t      port_conf->dequeue_depth,\n+\t\t\t\t\t      port_conf->enqueue_depth);\n+\t\tif (ret < 0) {\n+\t\t\tDLB2_LOG_ERR(\"Failed to create the DIR port\\n\");\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\t/* Save off port config for reconfig */\n+\tdlb2->ev_ports[ev_port_id].conf = *port_conf;\n+\n+\tdlb2->ev_ports[ev_port_id].id = ev_port_id;\n+\tdlb2->ev_ports[ev_port_id].enq_configured = true;\n+\tdlb2->ev_ports[ev_port_id].setup_done = true;\n+\tdlb2->ev_ports[ev_port_id].inflight_max =\n+\t\tport_conf->new_event_threshold;\n+\tdlb2->ev_ports[ev_port_id].implicit_release =\n+\t\t!(port_conf->event_port_cfg &\n+\t\t  RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);\n+\tdlb2->ev_ports[ev_port_id].outstanding_releases = 0;\n+\tdlb2->ev_ports[ev_port_id].inflight_credits = 0;\n+\tdlb2->ev_ports[ev_port_id].credit_update_quanta =\n+\t\tRTE_LIBRTE_PMD_DLB2_SW_CREDIT_QUANTA;\n+\tdlb2->ev_ports[ev_port_id].dlb2 = dlb2; /* reverse link */\n+\n+\t/* Tear down pre-existing port->queue links */\n+\tif (dlb2->run_state == DLB2_RUN_STATE_STOPPED)\n+\t\tdlb2_port_link_teardown(dlb2, &dlb2->ev_ports[ev_port_id]);\n+\n+\tdev->data->ports[ev_port_id] = &dlb2->ev_ports[ev_port_id];\n+\n+\treturn 0;\n+}\n+\n static void\n dlb2_entry_points_init(struct rte_eventdev *dev)\n {\n@@ -1053,6 +1579,7 @@ dlb2_entry_points_init(struct rte_eventdev *dev)\n \t\t.queue_def_conf   = dlb2_eventdev_queue_default_conf_get,\n \t\t.queue_setup      = dlb2_eventdev_queue_setup,\n \t\t.port_def_conf    = dlb2_eventdev_port_default_conf_get,\n+\t\t.port_setup       = dlb2_eventdev_port_setup,\n \t\t.dump             = dlb2_eventdev_dump,\n \t\t.xstats_get       = dlb2_eventdev_xstats_get,\n \t\t.xstats_get_names = dlb2_eventdev_xstats_get_names,\ndiff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c\nindex f50a918..84cbf25 100644\n--- a/drivers/event/dlb2/dlb2_iface.c\n+++ b/drivers/event/dlb2/dlb2_iface.c\n@@ -57,3 +57,12 @@ int (*dlb2_iface_set_sn_allocation)(struct dlb2_hw_dev *handle,\n \n int (*dlb2_iface_get_sn_occupancy)(struct dlb2_hw_dev *handle,\n \t\t\t\t   struct dlb2_get_sn_occupancy_args *args);\n+\n+int (*dlb2_iface_ldb_port_create)(struct dlb2_hw_dev *handle,\n+\t\t\t\t  struct dlb2_create_ldb_port_args *cfg,\n+\t\t\t\t  enum dlb2_cq_poll_modes poll_mode);\n+\n+int (*dlb2_iface_dir_port_create)(struct dlb2_hw_dev *handle,\n+\t\t\t\t  struct dlb2_create_dir_port_args *cfg,\n+\t\t\t\t  enum dlb2_cq_poll_modes poll_mode);\n+\ndiff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h\nindex c1ef7c2..a090a54 100644\n--- a/drivers/event/dlb2/dlb2_iface.h\n+++ b/drivers/event/dlb2/dlb2_iface.h\n@@ -43,4 +43,12 @@ extern int (*dlb2_iface_set_sn_allocation)(struct dlb2_hw_dev *handle,\n extern int (*dlb2_iface_get_sn_occupancy)(struct dlb2_hw_dev *handle,\n \t\t\t\t  struct dlb2_get_sn_occupancy_args *args);\n \n+extern int (*dlb2_iface_ldb_port_create)(struct dlb2_hw_dev *handle,\n+\t\t\t\t\t struct dlb2_create_ldb_port_args *cfg,\n+\t\t\t\t\t enum dlb2_cq_poll_modes poll_mode);\n+\n+extern int (*dlb2_iface_dir_port_create)(struct dlb2_hw_dev *handle,\n+\t\t\t\t\t struct dlb2_create_dir_port_args *cfg,\n+\t\t\t\t\t enum dlb2_cq_poll_modes poll_mode);\n+\n #endif /* _DLB2_IFACE_H_ */\ndiff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c\nindex dc9d19a..bb8683c 100644\n--- a/drivers/event/dlb2/pf/base/dlb2_resource.c\n+++ b/drivers/event/dlb2/pf/base/dlb2_resource.c\n@@ -3970,3 +3970,924 @@ int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw,\n \n \treturn 0;\n }\n+\n+static void dlb2_ldb_port_configure_pp(struct dlb2_hw *hw,\n+\t\t\t\t       struct dlb2_hw_domain *domain,\n+\t\t\t\t       struct dlb2_ldb_port *port,\n+\t\t\t\t       bool vdev_req,\n+\t\t\t\t       unsigned int vdev_id)\n+{\n+\tunion dlb2_sys_ldb_pp2vas r0 = { {0} };\n+\tunion dlb2_sys_ldb_pp_v r4 = { {0} };\n+\n+\tr0.field.vas = domain->id.phys_id;\n+\n+\tDLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VAS(port->id.phys_id), r0.val);\n+\n+\tif (vdev_req) {\n+\t\tunion dlb2_sys_vf_ldb_vpp2pp r1 = { {0} };\n+\t\tunion dlb2_sys_ldb_pp2vdev r2 = { {0} };\n+\t\tunion dlb2_sys_vf_ldb_vpp_v r3 = { {0} };\n+\t\tunsigned int offs;\n+\t\tu32 virt_id;\n+\n+\t\t/*\n+\t\t * DLB uses producer port address bits 17:12 to determine the\n+\t\t * producer port ID. In Scalable IOV mode, PP accesses come\n+\t\t * through the PF MMIO window for the physical producer port,\n+\t\t * so for translation purposes the virtual and physical port\n+\t\t * IDs are equal.\n+\t\t */\n+\t\tif (hw->virt_mode == DLB2_VIRT_SRIOV)\n+\t\t\tvirt_id = port->id.virt_id;\n+\t\telse\n+\t\t\tvirt_id = port->id.phys_id;\n+\n+\t\tr1.field.pp = port->id.phys_id;\n+\n+\t\toffs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;\n+\n+\t\tDLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP2PP(offs), r1.val);\n+\n+\t\tr2.field.vdev = vdev_id;\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),\n+\t\t\t    r2.val);\n+\n+\t\tr3.field.vpp_v = 1;\n+\n+\t\tDLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), r3.val);\n+\t}\n+\n+\tr4.field.pp_v = 1;\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_LDB_PP_V(port->id.phys_id),\n+\t\t    r4.val);\n+}\n+\n+static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,\n+\t\t\t\t      struct dlb2_hw_domain *domain,\n+\t\t\t\t      struct dlb2_ldb_port *port,\n+\t\t\t\t      uintptr_t cq_dma_base,\n+\t\t\t\t      struct dlb2_create_ldb_port_args *args,\n+\t\t\t\t      bool vdev_req,\n+\t\t\t\t      unsigned int vdev_id)\n+{\n+\tunion dlb2_sys_ldb_cq_addr_l r0 = { {0} };\n+\tunion dlb2_sys_ldb_cq_addr_u r1 = { {0} };\n+\tunion dlb2_sys_ldb_cq2vf_pf_ro r2 = { {0} };\n+\tunion dlb2_chp_ldb_cq_tkn_depth_sel r3 = { {0} };\n+\tunion dlb2_lsp_cq_ldb_tkn_depth_sel r4 = { {0} };\n+\tunion dlb2_chp_hist_list_lim r5 = { {0} };\n+\tunion dlb2_chp_hist_list_base r6 = { {0} };\n+\tunion dlb2_lsp_cq_ldb_infl_lim r7 = { {0} };\n+\tunion dlb2_chp_hist_list_push_ptr r8 = { {0} };\n+\tunion dlb2_chp_hist_list_pop_ptr r9 = { {0} };\n+\tunion dlb2_sys_ldb_cq_at r10 = { {0} };\n+\tunion dlb2_sys_ldb_cq_pasid r11 = { {0} };\n+\tunion dlb2_chp_ldb_cq2vas r12 = { {0} };\n+\tunion dlb2_lsp_cq2priov r13 = { {0} };\n+\n+\t/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */\n+\tr0.field.addr_l = cq_dma_base >> 6;\n+\n+\tDLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id), r0.val);\n+\n+\tr1.field.addr_u = cq_dma_base >> 32;\n+\n+\tDLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id), r1.val);\n+\n+\t/*\n+\t * 'ro' == relaxed ordering. This setting allows DLB2 to write\n+\t * cache lines out-of-order (but QEs within a cache line are always\n+\t * updated in-order).\n+\t */\n+\tr2.field.vf = vdev_id;\n+\tr2.field.is_pf = !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV);\n+\tr2.field.ro = 1;\n+\n+\tDLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id), r2.val);\n+\n+\tif (args->cq_depth <= 8) {\n+\t\tr3.field.token_depth_select = 1;\n+\t} else if (args->cq_depth == 16) {\n+\t\tr3.field.token_depth_select = 2;\n+\t} else if (args->cq_depth == 32) {\n+\t\tr3.field.token_depth_select = 3;\n+\t} else if (args->cq_depth == 64) {\n+\t\tr3.field.token_depth_select = 4;\n+\t} else if (args->cq_depth == 128) {\n+\t\tr3.field.token_depth_select = 5;\n+\t} else if (args->cq_depth == 256) {\n+\t\tr3.field.token_depth_select = 6;\n+\t} else if (args->cq_depth == 512) {\n+\t\tr3.field.token_depth_select = 7;\n+\t} else if (args->cq_depth == 1024) {\n+\t\tr3.field.token_depth_select = 8;\n+\t} else {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s():%d] Internal error: invalid CQ depth\\n\",\n+\t\t\t    __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id.phys_id),\n+\t\t    r3.val);\n+\n+\t/*\n+\t * To support CQs with depth less than 8, program the token count\n+\t * register with a non-zero initial value. Operations such as domain\n+\t * reset must take this initial value into account when quiescing the\n+\t * CQ.\n+\t */\n+\tport->init_tkn_cnt = 0;\n+\n+\tif (args->cq_depth < 8) {\n+\t\tunion dlb2_lsp_cq_ldb_tkn_cnt r14 = { {0} };\n+\n+\t\tport->init_tkn_cnt = 8 - args->cq_depth;\n+\n+\t\tr14.field.token_count = port->init_tkn_cnt;\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),\n+\t\t\t    r14.val);\n+\t} else {\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),\n+\t\t\t    DLB2_LSP_CQ_LDB_TKN_CNT_RST);\n+\t}\n+\n+\tr4.field.token_depth_select = r3.field.token_depth_select;\n+\tr4.field.ignore_depth = 0;\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id.phys_id),\n+\t\t    r4.val);\n+\n+\t/* Reset the CQ write pointer */\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_LDB_CQ_WPTR(port->id.phys_id),\n+\t\t    DLB2_CHP_LDB_CQ_WPTR_RST);\n+\n+\tr5.field.limit = port->hist_list_entry_limit - 1;\n+\n+\tDLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_LIM(port->id.phys_id), r5.val);\n+\n+\tr6.field.base = port->hist_list_entry_base;\n+\n+\tDLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_BASE(port->id.phys_id), r6.val);\n+\n+\t/*\n+\t * The inflight limit sets a cap on the number of QEs for which this CQ\n+\t * can owe completions at one time.\n+\t */\n+\tr7.field.limit = args->cq_history_list_size;\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_LIM(port->id.phys_id), r7.val);\n+\n+\tr8.field.push_ptr = r6.field.base;\n+\tr8.field.generation = 0;\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_HIST_LIST_PUSH_PTR(port->id.phys_id),\n+\t\t    r8.val);\n+\n+\tr9.field.pop_ptr = r6.field.base;\n+\tr9.field.generation = 0;\n+\n+\tDLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_POP_PTR(port->id.phys_id), r9.val);\n+\n+\t/*\n+\t * Address translation (AT) settings: 0: untranslated, 2: translated\n+\t * (see ATS spec regarding Address Type field for more details)\n+\t */\n+\tr10.field.cq_at = 0;\n+\n+\tDLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_AT(port->id.phys_id), r10.val);\n+\n+\tif (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {\n+\t\tr11.field.pasid = hw->pasid[vdev_id];\n+\t\tr11.field.fmt2 = 1;\n+\t}\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_LDB_CQ_PASID(port->id.phys_id),\n+\t\t    r11.val);\n+\n+\tr12.field.cq2vas = domain->id.phys_id;\n+\n+\tDLB2_CSR_WR(hw, DLB2_CHP_LDB_CQ2VAS(port->id.phys_id), r12.val);\n+\n+\t/* Disable the port's QID mappings */\n+\tr13.field.v = 0;\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id), r13.val);\n+\n+\treturn 0;\n+}\n+\n+static int dlb2_configure_ldb_port(struct dlb2_hw *hw,\n+\t\t\t\t   struct dlb2_hw_domain *domain,\n+\t\t\t\t   struct dlb2_ldb_port *port,\n+\t\t\t\t   uintptr_t cq_dma_base,\n+\t\t\t\t   struct dlb2_create_ldb_port_args *args,\n+\t\t\t\t   bool vdev_req,\n+\t\t\t\t   unsigned int vdev_id)\n+{\n+\tint ret, i;\n+\n+\tport->hist_list_entry_base = domain->hist_list_entry_base +\n+\t\t\t\t     domain->hist_list_entry_offset;\n+\tport->hist_list_entry_limit = port->hist_list_entry_base +\n+\t\t\t\t      args->cq_history_list_size;\n+\n+\tdomain->hist_list_entry_offset += args->cq_history_list_size;\n+\tdomain->avail_hist_list_entries -= args->cq_history_list_size;\n+\n+\tret = dlb2_ldb_port_configure_cq(hw,\n+\t\t\t\t\t domain,\n+\t\t\t\t\t port,\n+\t\t\t\t\t cq_dma_base,\n+\t\t\t\t\t args,\n+\t\t\t\t\t vdev_req,\n+\t\t\t\t\t vdev_id);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tdlb2_ldb_port_configure_pp(hw,\n+\t\t\t\t   domain,\n+\t\t\t\t   port,\n+\t\t\t\t   vdev_req,\n+\t\t\t\t   vdev_id);\n+\n+\tdlb2_ldb_port_cq_enable(hw, port);\n+\n+\tfor (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++)\n+\t\tport->qid_map[i].state = DLB2_QUEUE_UNMAPPED;\n+\tport->num_mappings = 0;\n+\n+\tport->enabled = true;\n+\n+\tport->configured = true;\n+\n+\treturn 0;\n+}\n+\n+static void\n+dlb2_log_create_ldb_port_args(struct dlb2_hw *hw,\n+\t\t\t      u32 domain_id,\n+\t\t\t      uintptr_t cq_dma_base,\n+\t\t\t      struct dlb2_create_ldb_port_args *args,\n+\t\t\t      bool vdev_req,\n+\t\t\t      unsigned int vdev_id)\n+{\n+\tDLB2_HW_DBG(hw, \"DLB2 create load-balanced port arguments:\\n\");\n+\tif (vdev_req)\n+\t\tDLB2_HW_DBG(hw, \"(Request from vdev %d)\\n\", vdev_id);\n+\tDLB2_HW_DBG(hw, \"\\tDomain ID:                 %d\\n\",\n+\t\t    domain_id);\n+\tDLB2_HW_DBG(hw, \"\\tCQ depth:                  %d\\n\",\n+\t\t    args->cq_depth);\n+\tDLB2_HW_DBG(hw, \"\\tCQ hist list size:         %d\\n\",\n+\t\t    args->cq_history_list_size);\n+\tDLB2_HW_DBG(hw, \"\\tCQ base address:           0x%lx\\n\",\n+\t\t    cq_dma_base);\n+\tDLB2_HW_DBG(hw, \"\\tCoS ID:                    %u\\n\", args->cos_id);\n+\tDLB2_HW_DBG(hw, \"\\tStrict CoS allocation:     %u\\n\",\n+\t\t    args->cos_strict);\n+}\n+\n+static int\n+dlb2_verify_create_ldb_port_args(struct dlb2_hw *hw,\n+\t\t\t\t u32 domain_id,\n+\t\t\t\t uintptr_t cq_dma_base,\n+\t\t\t\t struct dlb2_create_ldb_port_args *args,\n+\t\t\t\t struct dlb2_cmd_response *resp,\n+\t\t\t\t bool vdev_req,\n+\t\t\t\t unsigned int vdev_id)\n+{\n+\tstruct dlb2_hw_domain *domain;\n+\tint i;\n+\n+\tdomain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);\n+\n+\tif (!domain) {\n+\t\tresp->status = DLB2_ST_INVALID_DOMAIN_ID;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (!domain->configured) {\n+\t\tresp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (domain->started) {\n+\t\tresp->status = DLB2_ST_DOMAIN_STARTED;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (args->cos_id >= DLB2_NUM_COS_DOMAINS) {\n+\t\tresp->status = DLB2_ST_INVALID_COS_ID;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (args->cos_strict) {\n+\t\tif (dlb2_list_empty(&domain->avail_ldb_ports[args->cos_id])) {\n+\t\t\tresp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t} else {\n+\t\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\t\tif (!dlb2_list_empty(&domain->avail_ldb_ports[i]))\n+\t\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (i == DLB2_NUM_COS_DOMAINS) {\n+\t\t\tresp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\t/* Check cache-line alignment */\n+\tif ((cq_dma_base & 0x3F) != 0) {\n+\t\tresp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (args->cq_depth != 1 &&\n+\t    args->cq_depth != 2 &&\n+\t    args->cq_depth != 4 &&\n+\t    args->cq_depth != 8 &&\n+\t    args->cq_depth != 16 &&\n+\t    args->cq_depth != 32 &&\n+\t    args->cq_depth != 64 &&\n+\t    args->cq_depth != 128 &&\n+\t    args->cq_depth != 256 &&\n+\t    args->cq_depth != 512 &&\n+\t    args->cq_depth != 1024) {\n+\t\tresp->status = DLB2_ST_INVALID_CQ_DEPTH;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* The history list size must be >= 1 */\n+\tif (!args->cq_history_list_size) {\n+\t\tresp->status = DLB2_ST_INVALID_HIST_LIST_DEPTH;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (args->cq_history_list_size > domain->avail_hist_list_entries) {\n+\t\tresp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+\n+/**\n+ * dlb2_hw_create_ldb_port() - Allocate and initialize a load-balanced port and\n+ *\tits resources.\n+ * @hw:\tContains the current state of the DLB2 hardware.\n+ * @domain_id: Domain ID\n+ * @args: User-provided arguments.\n+ * @cq_dma_base: Base DMA address for consumer queue memory\n+ * @resp: Response to user.\n+ * @vdev_req: Request came from a virtual device.\n+ * @vdev_id: If vdev_req is true, this contains the virtual device's ID.\n+ *\n+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to\n+ * satisfy a request, resp->status will be set accordingly.\n+ */\n+int dlb2_hw_create_ldb_port(struct dlb2_hw *hw,\n+\t\t\t    u32 domain_id,\n+\t\t\t    struct dlb2_create_ldb_port_args *args,\n+\t\t\t    uintptr_t cq_dma_base,\n+\t\t\t    struct dlb2_cmd_response *resp,\n+\t\t\t    bool vdev_req,\n+\t\t\t    unsigned int vdev_id)\n+{\n+\tstruct dlb2_hw_domain *domain;\n+\tstruct dlb2_ldb_port *port;\n+\tint ret, cos_id, i;\n+\n+\tdlb2_log_create_ldb_port_args(hw,\n+\t\t\t\t      domain_id,\n+\t\t\t\t      cq_dma_base,\n+\t\t\t\t      args,\n+\t\t\t\t      vdev_req,\n+\t\t\t\t      vdev_id);\n+\n+\t/*\n+\t * Verify that hardware resources are available before attempting to\n+\t * satisfy the request. This simplifies the error unwinding code.\n+\t */\n+\tret = dlb2_verify_create_ldb_port_args(hw,\n+\t\t\t\t\t       domain_id,\n+\t\t\t\t\t       cq_dma_base,\n+\t\t\t\t\t       args,\n+\t\t\t\t\t       resp,\n+\t\t\t\t\t       vdev_req,\n+\t\t\t\t\t       vdev_id);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tdomain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);\n+\tif (!domain) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s():%d] Internal error: domain not found\\n\",\n+\t\t\t    __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tif (args->cos_strict) {\n+\t\tcos_id = args->cos_id;\n+\n+\t\tport = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[cos_id],\n+\t\t\t\t\t  typeof(*port));\n+\t} else {\n+\t\tint idx;\n+\n+\t\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\t\tidx = (args->cos_id + i) % DLB2_NUM_COS_DOMAINS;\n+\n+\t\t\tport = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[idx],\n+\t\t\t\t\t\t  typeof(*port));\n+\t\t\tif (port)\n+\t\t\t\tbreak;\n+\t\t}\n+\n+\t\tcos_id = idx;\n+\t}\n+\n+\tif (!port) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s():%d] Internal error: no available ldb ports\\n\",\n+\t\t\t    __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tif (port->configured) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s()] Internal error: avail_ldb_ports contains configured ports.\\n\",\n+\t\t\t    __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tret = dlb2_configure_ldb_port(hw,\n+\t\t\t\t      domain,\n+\t\t\t\t      port,\n+\t\t\t\t      cq_dma_base,\n+\t\t\t\t      args,\n+\t\t\t\t      vdev_req,\n+\t\t\t\t      vdev_id);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\t/*\n+\t * Configuration succeeded, so move the resource from the 'avail' to\n+\t * the 'used' list.\n+\t */\n+\tdlb2_list_del(&domain->avail_ldb_ports[cos_id], &port->domain_list);\n+\n+\tdlb2_list_add(&domain->used_ldb_ports[cos_id], &port->domain_list);\n+\n+\tresp->status = 0;\n+\tresp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;\n+\n+\treturn 0;\n+}\n+\n+static void\n+dlb2_log_create_dir_port_args(struct dlb2_hw *hw,\n+\t\t\t      u32 domain_id,\n+\t\t\t      uintptr_t cq_dma_base,\n+\t\t\t      struct dlb2_create_dir_port_args *args,\n+\t\t\t      bool vdev_req,\n+\t\t\t      unsigned int vdev_id)\n+{\n+\tDLB2_HW_DBG(hw, \"DLB2 create directed port arguments:\\n\");\n+\tif (vdev_req)\n+\t\tDLB2_HW_DBG(hw, \"(Request from vdev %d)\\n\", vdev_id);\n+\tDLB2_HW_DBG(hw, \"\\tDomain ID:                 %d\\n\",\n+\t\t    domain_id);\n+\tDLB2_HW_DBG(hw, \"\\tCQ depth:                  %d\\n\",\n+\t\t    args->cq_depth);\n+\tDLB2_HW_DBG(hw, \"\\tCQ base address:           0x%lx\\n\",\n+\t\t    cq_dma_base);\n+}\n+\n+static struct dlb2_dir_pq_pair *\n+dlb2_get_domain_used_dir_pq(u32 id,\n+\t\t\t    bool vdev_req,\n+\t\t\t    struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_dir_pq_pair *port;\n+\tRTE_SET_USED(iter);\n+\n+\tif (id >= DLB2_MAX_NUM_DIR_PORTS)\n+\t\treturn NULL;\n+\n+\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)\n+\t\tif ((!vdev_req && port->id.phys_id == id) ||\n+\t\t    (vdev_req && port->id.virt_id == id))\n+\t\t\treturn port;\n+\n+\treturn NULL;\n+}\n+\n+static int\n+dlb2_verify_create_dir_port_args(struct dlb2_hw *hw,\n+\t\t\t\t u32 domain_id,\n+\t\t\t\t uintptr_t cq_dma_base,\n+\t\t\t\t struct dlb2_create_dir_port_args *args,\n+\t\t\t\t struct dlb2_cmd_response *resp,\n+\t\t\t\t bool vdev_req,\n+\t\t\t\t unsigned int vdev_id)\n+{\n+\tstruct dlb2_hw_domain *domain;\n+\n+\tdomain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);\n+\n+\tif (!domain) {\n+\t\tresp->status = DLB2_ST_INVALID_DOMAIN_ID;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (!domain->configured) {\n+\t\tresp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (domain->started) {\n+\t\tresp->status = DLB2_ST_DOMAIN_STARTED;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/*\n+\t * If the user claims the queue is already configured, validate\n+\t * the queue ID, its domain, and whether the queue is configured.\n+\t */\n+\tif (args->queue_id != -1) {\n+\t\tstruct dlb2_dir_pq_pair *queue;\n+\n+\t\tqueue = dlb2_get_domain_used_dir_pq(args->queue_id,\n+\t\t\t\t\t\t    vdev_req,\n+\t\t\t\t\t\t    domain);\n+\n+\t\tif (!queue || queue->domain_id.phys_id != domain->id.phys_id ||\n+\t\t    !queue->queue_configured) {\n+\t\t\tresp->status = DLB2_ST_INVALID_DIR_QUEUE_ID;\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * If the port's queue is not configured, validate that a free\n+\t * port-queue pair is available.\n+\t */\n+\tif (args->queue_id == -1 &&\n+\t    dlb2_list_empty(&domain->avail_dir_pq_pairs)) {\n+\t\tresp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check cache-line alignment */\n+\tif ((cq_dma_base & 0x3F) != 0) {\n+\t\tresp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (args->cq_depth != 1 &&\n+\t    args->cq_depth != 2 &&\n+\t    args->cq_depth != 4 &&\n+\t    args->cq_depth != 8 &&\n+\t    args->cq_depth != 16 &&\n+\t    args->cq_depth != 32 &&\n+\t    args->cq_depth != 64 &&\n+\t    args->cq_depth != 128 &&\n+\t    args->cq_depth != 256 &&\n+\t    args->cq_depth != 512 &&\n+\t    args->cq_depth != 1024) {\n+\t\tresp->status = DLB2_ST_INVALID_CQ_DEPTH;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void dlb2_dir_port_configure_pp(struct dlb2_hw *hw,\n+\t\t\t\t       struct dlb2_hw_domain *domain,\n+\t\t\t\t       struct dlb2_dir_pq_pair *port,\n+\t\t\t\t       bool vdev_req,\n+\t\t\t\t       unsigned int vdev_id)\n+{\n+\tunion dlb2_sys_dir_pp2vas r0 = { {0} };\n+\tunion dlb2_sys_dir_pp_v r4 = { {0} };\n+\n+\tr0.field.vas = domain->id.phys_id;\n+\n+\tDLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VAS(port->id.phys_id), r0.val);\n+\n+\tif (vdev_req) {\n+\t\tunion dlb2_sys_vf_dir_vpp2pp r1 = { {0} };\n+\t\tunion dlb2_sys_dir_pp2vdev r2 = { {0} };\n+\t\tunion dlb2_sys_vf_dir_vpp_v r3 = { {0} };\n+\t\tunsigned int offs;\n+\t\tu32 virt_id;\n+\n+\t\t/*\n+\t\t * DLB uses producer port address bits 17:12 to determine the\n+\t\t * producer port ID. In Scalable IOV mode, PP accesses come\n+\t\t * through the PF MMIO window for the physical producer port,\n+\t\t * so for translation purposes the virtual and physical port\n+\t\t * IDs are equal.\n+\t\t */\n+\t\tif (hw->virt_mode == DLB2_VIRT_SRIOV)\n+\t\t\tvirt_id = port->id.virt_id;\n+\t\telse\n+\t\t\tvirt_id = port->id.phys_id;\n+\n+\t\tr1.field.pp = port->id.phys_id;\n+\n+\t\toffs = vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;\n+\n+\t\tDLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), r1.val);\n+\n+\t\tr2.field.vdev = vdev_id;\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),\n+\t\t\t    r2.val);\n+\n+\t\tr3.field.vpp_v = 1;\n+\n+\t\tDLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r3.val);\n+\t}\n+\n+\tr4.field.pp_v = 1;\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_DIR_PP_V(port->id.phys_id),\n+\t\t    r4.val);\n+}\n+\n+static int dlb2_dir_port_configure_cq(struct dlb2_hw *hw,\n+\t\t\t\t      struct dlb2_hw_domain *domain,\n+\t\t\t\t      struct dlb2_dir_pq_pair *port,\n+\t\t\t\t      uintptr_t cq_dma_base,\n+\t\t\t\t      struct dlb2_create_dir_port_args *args,\n+\t\t\t\t      bool vdev_req,\n+\t\t\t\t      unsigned int vdev_id)\n+{\n+\tunion dlb2_sys_dir_cq_addr_l r0 = { {0} };\n+\tunion dlb2_sys_dir_cq_addr_u r1 = { {0} };\n+\tunion dlb2_sys_dir_cq2vf_pf_ro r2 = { {0} };\n+\tunion dlb2_chp_dir_cq_tkn_depth_sel r3 = { {0} };\n+\tunion dlb2_lsp_cq_dir_tkn_depth_sel_dsi r4 = { {0} };\n+\tunion dlb2_sys_dir_cq_fmt r9 = { {0} };\n+\tunion dlb2_sys_dir_cq_at r10 = { {0} };\n+\tunion dlb2_sys_dir_cq_pasid r11 = { {0} };\n+\tunion dlb2_chp_dir_cq2vas r12 = { {0} };\n+\n+\t/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */\n+\tr0.field.addr_l = cq_dma_base >> 6;\n+\n+\tDLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id), r0.val);\n+\n+\tr1.field.addr_u = cq_dma_base >> 32;\n+\n+\tDLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id), r1.val);\n+\n+\t/*\n+\t * 'ro' == relaxed ordering. This setting allows DLB2 to write\n+\t * cache lines out-of-order (but QEs within a cache line are always\n+\t * updated in-order).\n+\t */\n+\tr2.field.vf = vdev_id;\n+\tr2.field.is_pf = !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV);\n+\tr2.field.ro = 1;\n+\n+\tDLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id), r2.val);\n+\n+\tif (args->cq_depth <= 8) {\n+\t\tr3.field.token_depth_select = 1;\n+\t} else if (args->cq_depth == 16) {\n+\t\tr3.field.token_depth_select = 2;\n+\t} else if (args->cq_depth == 32) {\n+\t\tr3.field.token_depth_select = 3;\n+\t} else if (args->cq_depth == 64) {\n+\t\tr3.field.token_depth_select = 4;\n+\t} else if (args->cq_depth == 128) {\n+\t\tr3.field.token_depth_select = 5;\n+\t} else if (args->cq_depth == 256) {\n+\t\tr3.field.token_depth_select = 6;\n+\t} else if (args->cq_depth == 512) {\n+\t\tr3.field.token_depth_select = 7;\n+\t} else if (args->cq_depth == 1024) {\n+\t\tr3.field.token_depth_select = 8;\n+\t} else {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s():%d] Internal error: invalid CQ depth\\n\",\n+\t\t\t    __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id.phys_id),\n+\t\t    r3.val);\n+\n+\t/*\n+\t * To support CQs with depth less than 8, program the token count\n+\t * register with a non-zero initial value. Operations such as domain\n+\t * reset must take this initial value into account when quiescing the\n+\t * CQ.\n+\t */\n+\tport->init_tkn_cnt = 0;\n+\n+\tif (args->cq_depth < 8) {\n+\t\tunion dlb2_lsp_cq_dir_tkn_cnt r13 = { {0} };\n+\n+\t\tport->init_tkn_cnt = 8 - args->cq_depth;\n+\n+\t\tr13.field.count = port->init_tkn_cnt;\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),\n+\t\t\t    r13.val);\n+\t} else {\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),\n+\t\t\t    DLB2_LSP_CQ_DIR_TKN_CNT_RST);\n+\t}\n+\n+\tr4.field.token_depth_select = r3.field.token_depth_select;\n+\tr4.field.disable_wb_opt = 0;\n+\tr4.field.ignore_depth = 0;\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id.phys_id),\n+\t\t    r4.val);\n+\n+\t/* Reset the CQ write pointer */\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_DIR_CQ_WPTR(port->id.phys_id),\n+\t\t    DLB2_CHP_DIR_CQ_WPTR_RST);\n+\n+\t/* Virtualize the PPID */\n+\tr9.field.keep_pf_ppid = 0;\n+\n+\tDLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_FMT(port->id.phys_id), r9.val);\n+\n+\t/*\n+\t * Address translation (AT) settings: 0: untranslated, 2: translated\n+\t * (see ATS spec regarding Address Type field for more details)\n+\t */\n+\tr10.field.cq_at = 0;\n+\n+\tDLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_AT(port->id.phys_id), r10.val);\n+\n+\tif (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {\n+\t\tr11.field.pasid = hw->pasid[vdev_id];\n+\t\tr11.field.fmt2 = 1;\n+\t}\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_DIR_CQ_PASID(port->id.phys_id),\n+\t\t    r11.val);\n+\n+\tr12.field.cq2vas = domain->id.phys_id;\n+\n+\tDLB2_CSR_WR(hw, DLB2_CHP_DIR_CQ2VAS(port->id.phys_id), r12.val);\n+\n+\treturn 0;\n+}\n+\n+static int dlb2_configure_dir_port(struct dlb2_hw *hw,\n+\t\t\t\t   struct dlb2_hw_domain *domain,\n+\t\t\t\t   struct dlb2_dir_pq_pair *port,\n+\t\t\t\t   uintptr_t cq_dma_base,\n+\t\t\t\t   struct dlb2_create_dir_port_args *args,\n+\t\t\t\t   bool vdev_req,\n+\t\t\t\t   unsigned int vdev_id)\n+{\n+\tint ret;\n+\n+\tret = dlb2_dir_port_configure_cq(hw,\n+\t\t\t\t\t domain,\n+\t\t\t\t\t port,\n+\t\t\t\t\t cq_dma_base,\n+\t\t\t\t\t args,\n+\t\t\t\t\t vdev_req,\n+\t\t\t\t\t vdev_id);\n+\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tdlb2_dir_port_configure_pp(hw,\n+\t\t\t\t   domain,\n+\t\t\t\t   port,\n+\t\t\t\t   vdev_req,\n+\t\t\t\t   vdev_id);\n+\n+\tdlb2_dir_port_cq_enable(hw, port);\n+\n+\tport->enabled = true;\n+\n+\tport->port_configured = true;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * dlb2_hw_create_dir_port() - Allocate and initialize a DLB directed port\n+ *\tand queue. The port/queue pair have the same ID and name.\n+ * @hw:\tContains the current state of the DLB2 hardware.\n+ * @domain_id: Domain ID\n+ * @args: User-provided arguments.\n+ * @cq_dma_base: Base DMA address for consumer queue memory\n+ * @resp: Response to user.\n+ * @vdev_req: Request came from a virtual device.\n+ * @vdev_id: If vdev_req is true, this contains the virtual device's ID.\n+ *\n+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to\n+ * satisfy a request, resp->status will be set accordingly.\n+ */\n+int dlb2_hw_create_dir_port(struct dlb2_hw *hw,\n+\t\t\t    u32 domain_id,\n+\t\t\t    struct dlb2_create_dir_port_args *args,\n+\t\t\t    uintptr_t cq_dma_base,\n+\t\t\t    struct dlb2_cmd_response *resp,\n+\t\t\t    bool vdev_req,\n+\t\t\t    unsigned int vdev_id)\n+{\n+\tstruct dlb2_dir_pq_pair *port;\n+\tstruct dlb2_hw_domain *domain;\n+\tint ret;\n+\n+\tdlb2_log_create_dir_port_args(hw,\n+\t\t\t\t      domain_id,\n+\t\t\t\t      cq_dma_base,\n+\t\t\t\t      args,\n+\t\t\t\t      vdev_req,\n+\t\t\t\t      vdev_id);\n+\n+\t/*\n+\t * Verify that hardware resources are available before attempting to\n+\t * satisfy the request. This simplifies the error unwinding code.\n+\t */\n+\tret = dlb2_verify_create_dir_port_args(hw,\n+\t\t\t\t\t       domain_id,\n+\t\t\t\t\t       cq_dma_base,\n+\t\t\t\t\t       args,\n+\t\t\t\t\t       resp,\n+\t\t\t\t\t       vdev_req,\n+\t\t\t\t\t       vdev_id);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tdomain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);\n+\n+\tif (args->queue_id != -1)\n+\t\tport = dlb2_get_domain_used_dir_pq(args->queue_id,\n+\t\t\t\t\t\t   vdev_req,\n+\t\t\t\t\t\t   domain);\n+\telse\n+\t\tport = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,\n+\t\t\t\t\t  typeof(*port));\n+\tif (!port) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s():%d] Internal error: no available dir ports\\n\",\n+\t\t\t    __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tret = dlb2_configure_dir_port(hw,\n+\t\t\t\t      domain,\n+\t\t\t\t      port,\n+\t\t\t\t      cq_dma_base,\n+\t\t\t\t      args,\n+\t\t\t\t      vdev_req,\n+\t\t\t\t      vdev_id);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\t/*\n+\t * Configuration succeeded, so move the resource from the 'avail' to\n+\t * the 'used' list (if it's not already there).\n+\t */\n+\tif (args->queue_id == -1) {\n+\t\tdlb2_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);\n+\n+\t\tdlb2_list_add(&domain->used_dir_pq_pairs, &port->domain_list);\n+\t}\n+\n+\tresp->status = 0;\n+\tresp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/event/dlb2/pf/dlb2_main.c b/drivers/event/dlb2/pf/dlb2_main.c\nindex b8e32ab..582ef53 100644\n--- a/drivers/event/dlb2/pf/dlb2_main.c\n+++ b/drivers/event/dlb2/pf/dlb2_main.c\n@@ -642,3 +642,31 @@ dlb2_pf_create_ldb_queue(struct dlb2_hw *hw,\n \treturn dlb2_hw_create_ldb_queue(hw, id, args, resp, NOT_VF_REQ,\n \t\t\t\t\tPF_ID_ZERO);\n }\n+\n+int\n+dlb2_pf_create_ldb_port(struct dlb2_hw *hw,\n+\t\t\tu32 id,\n+\t\t\tstruct dlb2_create_ldb_port_args *args,\n+\t\t\tuintptr_t cq_dma_base,\n+\t\t\tstruct dlb2_cmd_response *resp)\n+{\n+\treturn dlb2_hw_create_ldb_port(hw, id, args,\n+\t\t\t\t       cq_dma_base,\n+\t\t\t\t       resp,\n+\t\t\t\t       NOT_VF_REQ,\n+\t\t\t\t       PF_ID_ZERO);\n+}\n+\n+int\n+dlb2_pf_create_dir_port(struct dlb2_hw *hw,\n+\t\t\tu32 id,\n+\t\t\tstruct dlb2_create_dir_port_args *args,\n+\t\t\tuintptr_t cq_dma_base,\n+\t\t\tstruct dlb2_cmd_response *resp)\n+{\n+\treturn dlb2_hw_create_dir_port(hw, id, args,\n+\t\t\t\t       cq_dma_base,\n+\t\t\t\t       resp,\n+\t\t\t\t       NOT_VF_REQ,\n+\t\t\t\t       PF_ID_ZERO);\n+}\ndiff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c\nindex dea70e6..a6824b1 100644\n--- a/drivers/event/dlb2/pf/dlb2_pf.c\n+++ b/drivers/event/dlb2/pf/dlb2_pf.c\n@@ -234,6 +234,183 @@ dlb2_pf_set_sn_allocation(struct dlb2_hw_dev *handle,\n \treturn ret;\n }\n \n+static void *\n+dlb2_alloc_coherent_aligned(uintptr_t *phys, size_t size, int align)\n+{\n+\tconst struct rte_memzone *mz;\n+\tchar mz_name[RTE_MEMZONE_NAMESIZE];\n+\tuint32_t core_id = rte_lcore_id();\n+\tunsigned int socket_id;\n+\n+\tsnprintf(mz_name, sizeof(mz_name) - 1, \"%lx\",\n+\t\t (unsigned long)rte_get_timer_cycles());\n+\tif (core_id == (unsigned int)LCORE_ID_ANY)\n+\t\tcore_id = rte_get_master_lcore();\n+\tsocket_id = rte_lcore_to_socket_id(core_id);\n+\tmz = rte_memzone_reserve_aligned(mz_name, size, socket_id,\n+\t\t\t\t\t RTE_MEMZONE_IOVA_CONTIG, align);\n+\tif (!mz) {\n+\t\tDLB2_LOG_DBG(\"Unable to allocate DMA memory of size %zu bytes - %s\\n\",\n+\t\t\t     size, rte_strerror(rte_errno));\n+\t\t*phys = 0;\n+\t\treturn NULL;\n+\t}\n+\t*phys = mz->iova;\n+\treturn mz->addr;\n+}\n+\n+static int\n+dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,\n+\t\t\tstruct dlb2_create_ldb_port_args *cfg,\n+\t\t\tenum dlb2_cq_poll_modes poll_mode)\n+{\n+\tstruct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;\n+\tstruct dlb2_cmd_response response = {0};\n+\tstruct dlb2_port_memory port_memory;\n+\tint ret, cq_alloc_depth;\n+\tuint8_t *port_base;\n+\tint alloc_sz, qe_sz;\n+\tphys_addr_t cq_base;\n+\tphys_addr_t pp_base;\n+\tint is_dir = false;\n+\n+\tDLB2_INFO(dev->dlb2_device, \"Entering %s()\\n\", __func__);\n+\n+\tif (poll_mode == DLB2_CQ_POLL_MODE_STD)\n+\t\tqe_sz = sizeof(struct dlb2_dequeue_qe);\n+\telse\n+\t\tqe_sz = RTE_CACHE_LINE_SIZE;\n+\n+\t/* Calculate the port memory required, and round up to the nearest\n+\t * cache line.\n+\t */\n+\tcq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB2_MIN_HARDWARE_CQ_DEPTH);\n+\talloc_sz = cq_alloc_depth * qe_sz;\n+\talloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);\n+\n+\tport_base = dlb2_alloc_coherent_aligned(&cq_base,\n+\t\t\t\t\t\talloc_sz,\n+\t\t\t\t\t\tPAGE_SIZE);\n+\tif (port_base == NULL)\n+\t\treturn -ENOMEM;\n+\n+\t/* Lock the page in memory */\n+\tret = rte_mem_lock_page(port_base);\n+\tif (ret < 0) {\n+\t\tDLB2_LOG_ERR(\"dlb2 pf pmd could not lock page for device i/o\\n\");\n+\t\tgoto create_port_err;\n+\t}\n+\n+\n+\tmemset(port_base, 0, alloc_sz);\n+\n+\tret = dlb2_pf_create_ldb_port(&dlb2_dev->hw,\n+\t\t\t\t      handle->domain_id,\n+\t\t\t\t      cfg,\n+\t\t\t\t      cq_base,\n+\t\t\t\t      &response);\n+\tif (ret)\n+\t\tgoto create_port_err;\n+\n+\tpp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);\n+\tdlb2_port[response.id][DLB2_LDB_PORT].pp_addr =\n+\t\t(void *)(uintptr_t)(pp_base + (PAGE_SIZE * response.id));\n+\n+\tdlb2_port[response.id][DLB2_LDB_PORT].cq_base =\n+\t\t(void *)(uintptr_t)(port_base);\n+\tmemset(&port_memory, 0, sizeof(port_memory));\n+\tdlb2_list_init_head(&port_memory.list);\n+\n+\t/* Fill out the per-port memory tracking structure */\n+\tdlb2_dev->ldb_port_pages[response.id].valid = true;\n+\tdlb2_list_splice(&port_memory.list,\n+\t\t\t &dlb2_dev->ldb_port_pages[response.id].list);\n+\n+\tcfg->response = response;\n+\n+\tDLB2_INFO(dev->dlb2_device, \"Exiting %s() with ret=%d\\n\",\n+\t\t  __func__, ret);\n+\n+create_port_err:\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,\n+\t\t\tstruct dlb2_create_dir_port_args *cfg,\n+\t\t\tenum dlb2_cq_poll_modes poll_mode)\n+{\n+\tstruct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;\n+\tstruct dlb2_cmd_response response = {0};\n+\tstruct dlb2_port_memory port_memory;\n+\tint ret;\n+\tuint8_t *port_base;\n+\tint alloc_sz, qe_sz;\n+\tphys_addr_t cq_base;\n+\tphys_addr_t pp_base;\n+\tint is_dir = true;\n+\n+\tDLB2_INFO(dev->dlb2_device, \"Entering %s()\\n\", __func__);\n+\n+\tif (poll_mode == DLB2_CQ_POLL_MODE_STD)\n+\t\tqe_sz = sizeof(struct dlb2_dequeue_qe);\n+\telse\n+\t\tqe_sz = RTE_CACHE_LINE_SIZE;\n+\n+\t/* Calculate the port memory required, and round up to the nearest\n+\t * cache line.\n+\t */\n+\talloc_sz = cfg->cq_depth * qe_sz;\n+\talloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);\n+\n+\tport_base = dlb2_alloc_coherent_aligned(&cq_base,\n+\t\t\t\t\t\talloc_sz,\n+\t\t\t\t\t\tPAGE_SIZE);\n+\tif (port_base == NULL)\n+\t\treturn -ENOMEM;\n+\n+\t/* Lock the page in memory */\n+\tret = rte_mem_lock_page(port_base);\n+\tif (ret < 0) {\n+\t\tDLB2_LOG_ERR(\"dlb2 pf pmd could not lock page for device i/o\\n\");\n+\t\tgoto create_port_err;\n+\t}\n+\n+\tmemset(port_base, 0, alloc_sz);\n+\n+\tret = dlb2_pf_create_dir_port(&dlb2_dev->hw,\n+\t\t\t\t      handle->domain_id,\n+\t\t\t\t      cfg,\n+\t\t\t\t      cq_base,\n+\t\t\t\t      &response);\n+\tif (ret)\n+\t\tgoto create_port_err;\n+\n+\tpp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);\n+\tdlb2_port[response.id][DLB2_DIR_PORT].pp_addr =\n+\t\t(void *)(uintptr_t)(pp_base + (PAGE_SIZE * response.id));\n+\n+\tdlb2_port[response.id][DLB2_DIR_PORT].cq_base =\n+\t\t(void *)(uintptr_t)(port_base);\n+\tmemset(&port_memory, 0, sizeof(port_memory));\n+\tdlb2_list_init_head(&port_memory.list);\n+\n+\t/* Fill out the per-port memory tracking structure */\n+\tdlb2_dev->dir_port_pages[response.id].valid = true;\n+\tdlb2_list_splice(&port_memory.list,\n+\t\t\t &dlb2_dev->dir_port_pages[response.id].list);\n+\n+\tcfg->response = response;\n+\n+\tDLB2_INFO(dev->dlb2_device, \"Exiting %s() with ret=%d\\n\",\n+\t\t  __func__, ret);\n+\n+create_port_err:\n+\n+\treturn ret;\n+}\n+\n static void\n dlb2_pf_iface_fn_ptrs_init(void)\n {\n@@ -247,6 +424,8 @@ dlb2_pf_iface_fn_ptrs_init(void)\n \tdlb2_iface_get_cq_poll_mode = dlb2_pf_get_cq_poll_mode;\n \tdlb2_iface_sched_domain_create = dlb2_pf_sched_domain_create;\n \tdlb2_iface_ldb_queue_create = dlb2_pf_ldb_queue_create;\n+\tdlb2_iface_ldb_port_create = dlb2_pf_ldb_port_create;\n+\tdlb2_iface_dir_port_create = dlb2_pf_dir_port_create;\n \tdlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;\n \tdlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation;\n \tdlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy;\n",
    "prefixes": [
        "11/22"
    ]
}