> -----Original Message-----
> From: Bruce Richardson [mailto:bruce.richardson@intel.com]
> Sent: Friday, June 10, 2016 2:18 PM
> To: John Daley (johndale) <johndale@cisco.com>
> Cc: dev@dpdk.org; bruce.richarsdon@intel.com
> Subject: Re: [dpdk-dev] [PATCH v3 07/13] enic: use Tx completion messages
> instead of descriptors
>
> On Thu, Jun 02, 2016 at 05:22:51PM -0700, John Daley wrote:
> > The NIC can either DMA a separate completion message for each
> > completed send or periodically just DMA an index of the last completed
> > send. Switch to the second method which improves cache locality and
> > performance.
> >
> > Signed-off-by: John Daley <johndale@cisco.com>
>
> Can you perhaps send me an updated wording for this commit message as
> the title and commit message conflict. The title says to use completion
> messages not descriptors, while the body talks about moving away from a
> completion message way of working.
> Is the former method a descriptor writeback method, while the latter a head
> pointer writeback? If so, I think the title could be:
>
> "enic: use Tx head pointer not descriptor writeback"
>
> or something similar.
>
> Again, if you send on the updated commit text, I'll just update it on apply. I'd
> ideally like to get this patchset pushed to next-net first thing Monday.
Ok, I agree that it is confusing.
We moved from having the hardware send a completion descriptor for every packet to having it send the index of the last completed packet every once in a while. We can use the word 'index' and 'message' to describe the 2 methods and drop the word 'descriptor'. Here is a suggestion:
enic: use Tx completion index instead of completion messages
The NIC can either DMA a separate completion message for each completed send or periodically just DMA an index of the last completed send. Switch to the latter method which improves cache locality and performance.
Thank you,
John
>
> /Bruce
@@ -142,6 +142,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
vnic_wq_init_start(wq, cq_index, 0, 0,
error_interrupt_enable,
error_interrupt_offset);
+ wq->last_completed_index = 0;
}
void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error)
@@ -38,6 +38,7 @@
#include "vnic_dev.h"
#include "vnic_cq.h"
+#include <rte_memzone.h>
/* Work queue control */
struct vnic_wq_ctrl {
@@ -79,6 +80,8 @@ struct vnic_wq {
unsigned int tail_idx;
unsigned int pkts_outstanding;
unsigned int socket_id;
+ const struct rte_memzone *cqmsg_rz;
+ uint16_t last_completed_index;
};
static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
@@ -97,7 +97,6 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
}
}
-
void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
{
vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
@@ -235,12 +234,26 @@ void enic_init_vnic_resources(struct enic *enic)
unsigned int error_interrupt_enable = 1;
unsigned int error_interrupt_offset = 0;
unsigned int index = 0;
+ unsigned int cq_idx;
for (index = 0; index < enic->rq_count; index++) {
vnic_rq_init(&enic->rq[index],
enic_cq_rq(enic, index),
error_interrupt_enable,
error_interrupt_offset);
+
+ cq_idx = enic_cq_rq(enic, index);
+ vnic_cq_init(&enic->cq[cq_idx],
+ 0 /* flow_control_enable */,
+ 1 /* color_enable */,
+ 0 /* cq_head */,
+ 0 /* cq_tail */,
+ 1 /* cq_tail_color */,
+ 0 /* interrupt_enable */,
+ 1 /* cq_entry_enable */,
+ 0 /* cq_message_enable */,
+ 0 /* interrupt offset */,
+ 0 /* cq_message_addr */);
}
for (index = 0; index < enic->wq_count; index++) {
@@ -248,22 +261,19 @@ void enic_init_vnic_resources(struct enic *enic)
enic_cq_wq(enic, index),
error_interrupt_enable,
error_interrupt_offset);
- }
- vnic_dev_stats_clear(enic->vdev);
-
- for (index = 0; index < enic->cq_count; index++) {
- vnic_cq_init(&enic->cq[index],
+ cq_idx = enic_cq_wq(enic, index);
+ vnic_cq_init(&enic->cq[cq_idx],
0 /* flow_control_enable */,
1 /* color_enable */,
0 /* cq_head */,
0 /* cq_tail */,
1 /* cq_tail_color */,
0 /* interrupt_enable */,
- 1 /* cq_entry_enable */,
- 0 /* cq_message_enable */,
+ 0 /* cq_entry_enable */,
+ 1 /* cq_message_enable */,
0 /* interrupt offset */,
- 0 /* cq_message_addr */);
+ (u64)enic->wq[index].cqmsg_rz->phys_addr);
}
vnic_intr_init(&enic->intr,
@@ -507,6 +517,7 @@ void enic_free_wq(void *txq)
struct vnic_wq *wq = (struct vnic_wq *)txq;
struct enic *enic = vnic_dev_priv(wq->vdev);
+ rte_memzone_free(wq->cqmsg_rz);
vnic_wq_free(wq);
vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
}
@@ -517,6 +528,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
int err;
struct vnic_wq *wq = &enic->wq[queue_idx];
unsigned int cq_index = enic_cq_wq(enic, queue_idx);
+ char name[NAME_MAX];
+ static int instance;
wq->socket_id = socket_id;
if (nb_desc) {
@@ -552,6 +565,18 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
dev_err(enic, "error in allocation of cq for wq\n");
}
+ /* setup up CQ message */
+ snprintf((char *)name, sizeof(name),
+ "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
+ instance++);
+
+ wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
+ sizeof(uint32_t),
+ SOCKET_ID_ANY, 0,
+ ENIC_ALIGN);
+ if (!wq->cqmsg_rz)
+ return -ENOMEM;
+
return err;
}
@@ -348,11 +348,14 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq)
{
- unsigned int cq = enic_cq_wq(enic, wq->index);
+ u16 completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
- /* Return the work done */
- return vnic_cq_service(&enic->cq[cq],
- -1 /*wq_work_to_do*/, enic_wq_service, NULL);
+ if (wq->last_completed_index != completed_index) {
+ enic_wq_service(enic->vdev, NULL, 0, wq->index,
+ completed_index, NULL);
+ wq->last_completed_index = completed_index;
+ }
+ return 0;
}
void enic_post_wq_index(struct vnic_wq *wq)