@@ -757,11 +757,11 @@ show_port(void)
}
ret = rte_eth_dev_flow_ctrl_get(i, &fc_conf);
- if (ret == 0 && fc_conf.mode != RTE_FC_NONE) {
+ if (ret == 0 && fc_conf.mode != RTE_ETH_FC_NONE) {
printf("\t -- flow control mode %s%s high %u low %u pause %u%s%s\n",
- fc_conf.mode == RTE_FC_RX_PAUSE ? "rx " :
- fc_conf.mode == RTE_FC_TX_PAUSE ? "tx " :
- fc_conf.mode == RTE_FC_FULL ? "full" : "???",
+ fc_conf.mode == RTE_ETH_FC_RX_PAUSE ? "rx " :
+ fc_conf.mode == RTE_ETH_FC_TX_PAUSE ? "tx " :
+ fc_conf.mode == RTE_ETH_FC_FULL ? "full" : "???",
fc_conf.autoneg ? " auto" : "",
fc_conf.high_water,
fc_conf.low_water,
@@ -668,13 +668,13 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
struct test_perf *t = evt_test_priv(test);
struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
};
@@ -176,12 +176,12 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
struct rte_eth_rxconf rx_conf;
struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
};
@@ -223,7 +223,7 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
local_port_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
ret = rte_eth_dev_info_get(i, &dev_info);
if (ret != 0) {
@@ -233,9 +233,9 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
}
/* Enable mbuf fast free if PMD has the capability. */
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
rx_conf = dev_info.default_rxconf;
rx_conf.offloads = port_conf.rxmode.offloads;
@@ -5,7 +5,7 @@
#define FLOW_ITEM_MASK(_x) (UINT64_C(1) << _x)
#define FLOW_ACTION_MASK(_x) (UINT64_C(1) << _x)
#define FLOW_ATTR_MASK(_x) (UINT64_C(1) << _x)
-#define GET_RSS_HF() (ETH_RSS_IP)
+#define GET_RSS_HF() (RTE_ETH_RSS_IP)
/* Configuration */
#define RXQ_NUM 4
@@ -70,16 +70,16 @@ struct app_params app = {
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM,
+ .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
@@ -178,7 +178,7 @@ app_ports_check_link(void)
RTE_LOG(INFO, USER1, "Port %u %s\n",
port,
link_status_text);
- if (link.link_status == ETH_LINK_DOWN)
+ if (link.link_status == RTE_ETH_LINK_DOWN)
all_ports_up = 0;
}
@@ -1478,51 +1478,51 @@ parse_and_check_speed_duplex(char *speedstr, char *duplexstr, uint32_t *speed)
int duplex;
if (!strcmp(duplexstr, "half")) {
- duplex = ETH_LINK_HALF_DUPLEX;
+ duplex = RTE_ETH_LINK_HALF_DUPLEX;
} else if (!strcmp(duplexstr, "full")) {
- duplex = ETH_LINK_FULL_DUPLEX;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
} else if (!strcmp(duplexstr, "auto")) {
- duplex = ETH_LINK_FULL_DUPLEX;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
} else {
fprintf(stderr, "Unknown duplex parameter\n");
return -1;
}
if (!strcmp(speedstr, "10")) {
- *speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
- ETH_LINK_SPEED_10M_HD : ETH_LINK_SPEED_10M;
+ *speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+ RTE_ETH_LINK_SPEED_10M_HD : RTE_ETH_LINK_SPEED_10M;
} else if (!strcmp(speedstr, "100")) {
- *speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
- ETH_LINK_SPEED_100M_HD : ETH_LINK_SPEED_100M;
+ *speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+ RTE_ETH_LINK_SPEED_100M_HD : RTE_ETH_LINK_SPEED_100M;
} else {
- if (duplex != ETH_LINK_FULL_DUPLEX) {
+ if (duplex != RTE_ETH_LINK_FULL_DUPLEX) {
fprintf(stderr, "Invalid speed/duplex parameters\n");
return -1;
}
if (!strcmp(speedstr, "1000")) {
- *speed = ETH_LINK_SPEED_1G;
+ *speed = RTE_ETH_LINK_SPEED_1G;
} else if (!strcmp(speedstr, "10000")) {
- *speed = ETH_LINK_SPEED_10G;
+ *speed = RTE_ETH_LINK_SPEED_10G;
} else if (!strcmp(speedstr, "25000")) {
- *speed = ETH_LINK_SPEED_25G;
+ *speed = RTE_ETH_LINK_SPEED_25G;
} else if (!strcmp(speedstr, "40000")) {
- *speed = ETH_LINK_SPEED_40G;
+ *speed = RTE_ETH_LINK_SPEED_40G;
} else if (!strcmp(speedstr, "50000")) {
- *speed = ETH_LINK_SPEED_50G;
+ *speed = RTE_ETH_LINK_SPEED_50G;
} else if (!strcmp(speedstr, "100000")) {
- *speed = ETH_LINK_SPEED_100G;
+ *speed = RTE_ETH_LINK_SPEED_100G;
} else if (!strcmp(speedstr, "200000")) {
- *speed = ETH_LINK_SPEED_200G;
+ *speed = RTE_ETH_LINK_SPEED_200G;
} else if (!strcmp(speedstr, "auto")) {
- *speed = ETH_LINK_SPEED_AUTONEG;
+ *speed = RTE_ETH_LINK_SPEED_AUTONEG;
} else {
fprintf(stderr, "Unknown speed parameter\n");
return -1;
}
}
- if (*speed != ETH_LINK_SPEED_AUTONEG)
- *speed |= ETH_LINK_SPEED_FIXED;
+ if (*speed != RTE_ETH_LINK_SPEED_AUTONEG)
+ *speed |= RTE_ETH_LINK_SPEED_FIXED;
return 0;
}
@@ -2166,33 +2166,33 @@ cmd_config_rss_parsed(void *parsed_result,
int ret;
if (!strcmp(res->value, "all"))
- rss_conf.rss_hf = ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP |
- ETH_RSS_TCP | ETH_RSS_UDP | ETH_RSS_SCTP |
- ETH_RSS_L2_PAYLOAD | ETH_RSS_L2TPV3 | ETH_RSS_ESP |
- ETH_RSS_AH | ETH_RSS_PFCP | ETH_RSS_GTPU |
- ETH_RSS_ECPRI;
+ rss_conf.rss_hf = RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP |
+ RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP |
+ RTE_ETH_RSS_L2_PAYLOAD | RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP |
+ RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | RTE_ETH_RSS_GTPU |
+ RTE_ETH_RSS_ECPRI;
else if (!strcmp(res->value, "eth"))
- rss_conf.rss_hf = ETH_RSS_ETH;
+ rss_conf.rss_hf = RTE_ETH_RSS_ETH;
else if (!strcmp(res->value, "vlan"))
- rss_conf.rss_hf = ETH_RSS_VLAN;
+ rss_conf.rss_hf = RTE_ETH_RSS_VLAN;
else if (!strcmp(res->value, "ip"))
- rss_conf.rss_hf = ETH_RSS_IP;
+ rss_conf.rss_hf = RTE_ETH_RSS_IP;
else if (!strcmp(res->value, "udp"))
- rss_conf.rss_hf = ETH_RSS_UDP;
+ rss_conf.rss_hf = RTE_ETH_RSS_UDP;
else if (!strcmp(res->value, "tcp"))
- rss_conf.rss_hf = ETH_RSS_TCP;
+ rss_conf.rss_hf = RTE_ETH_RSS_TCP;
else if (!strcmp(res->value, "sctp"))
- rss_conf.rss_hf = ETH_RSS_SCTP;
+ rss_conf.rss_hf = RTE_ETH_RSS_SCTP;
else if (!strcmp(res->value, "ether"))
- rss_conf.rss_hf = ETH_RSS_L2_PAYLOAD;
+ rss_conf.rss_hf = RTE_ETH_RSS_L2_PAYLOAD;
else if (!strcmp(res->value, "port"))
- rss_conf.rss_hf = ETH_RSS_PORT;
+ rss_conf.rss_hf = RTE_ETH_RSS_PORT;
else if (!strcmp(res->value, "vxlan"))
- rss_conf.rss_hf = ETH_RSS_VXLAN;
+ rss_conf.rss_hf = RTE_ETH_RSS_VXLAN;
else if (!strcmp(res->value, "geneve"))
- rss_conf.rss_hf = ETH_RSS_GENEVE;
+ rss_conf.rss_hf = RTE_ETH_RSS_GENEVE;
else if (!strcmp(res->value, "nvgre"))
- rss_conf.rss_hf = ETH_RSS_NVGRE;
+ rss_conf.rss_hf = RTE_ETH_RSS_NVGRE;
else if (!strcmp(res->value, "l3-pre32"))
rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE32;
else if (!strcmp(res->value, "l3-pre40"))
@@ -2206,46 +2206,46 @@ cmd_config_rss_parsed(void *parsed_result,
else if (!strcmp(res->value, "l3-pre96"))
rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE96;
else if (!strcmp(res->value, "l3-src-only"))
- rss_conf.rss_hf = ETH_RSS_L3_SRC_ONLY;
+ rss_conf.rss_hf = RTE_ETH_RSS_L3_SRC_ONLY;
else if (!strcmp(res->value, "l3-dst-only"))
- rss_conf.rss_hf = ETH_RSS_L3_DST_ONLY;
+ rss_conf.rss_hf = RTE_ETH_RSS_L3_DST_ONLY;
else if (!strcmp(res->value, "l4-src-only"))
- rss_conf.rss_hf = ETH_RSS_L4_SRC_ONLY;
+ rss_conf.rss_hf = RTE_ETH_RSS_L4_SRC_ONLY;
else if (!strcmp(res->value, "l4-dst-only"))
- rss_conf.rss_hf = ETH_RSS_L4_DST_ONLY;
+ rss_conf.rss_hf = RTE_ETH_RSS_L4_DST_ONLY;
else if (!strcmp(res->value, "l2-src-only"))
- rss_conf.rss_hf = ETH_RSS_L2_SRC_ONLY;
+ rss_conf.rss_hf = RTE_ETH_RSS_L2_SRC_ONLY;
else if (!strcmp(res->value, "l2-dst-only"))
- rss_conf.rss_hf = ETH_RSS_L2_DST_ONLY;
+ rss_conf.rss_hf = RTE_ETH_RSS_L2_DST_ONLY;
else if (!strcmp(res->value, "l2tpv3"))
- rss_conf.rss_hf = ETH_RSS_L2TPV3;
+ rss_conf.rss_hf = RTE_ETH_RSS_L2TPV3;
else if (!strcmp(res->value, "esp"))
- rss_conf.rss_hf = ETH_RSS_ESP;
+ rss_conf.rss_hf = RTE_ETH_RSS_ESP;
else if (!strcmp(res->value, "ah"))
- rss_conf.rss_hf = ETH_RSS_AH;
+ rss_conf.rss_hf = RTE_ETH_RSS_AH;
else if (!strcmp(res->value, "pfcp"))
- rss_conf.rss_hf = ETH_RSS_PFCP;
+ rss_conf.rss_hf = RTE_ETH_RSS_PFCP;
else if (!strcmp(res->value, "pppoe"))
- rss_conf.rss_hf = ETH_RSS_PPPOE;
+ rss_conf.rss_hf = RTE_ETH_RSS_PPPOE;
else if (!strcmp(res->value, "gtpu"))
- rss_conf.rss_hf = ETH_RSS_GTPU;
+ rss_conf.rss_hf = RTE_ETH_RSS_GTPU;
else if (!strcmp(res->value, "ecpri"))
- rss_conf.rss_hf = ETH_RSS_ECPRI;
+ rss_conf.rss_hf = RTE_ETH_RSS_ECPRI;
else if (!strcmp(res->value, "mpls"))
- rss_conf.rss_hf = ETH_RSS_MPLS;
+ rss_conf.rss_hf = RTE_ETH_RSS_MPLS;
else if (!strcmp(res->value, "ipv4-chksum"))
- rss_conf.rss_hf = ETH_RSS_IPV4_CHKSUM;
+ rss_conf.rss_hf = RTE_ETH_RSS_IPV4_CHKSUM;
else if (!strcmp(res->value, "none"))
rss_conf.rss_hf = 0;
else if (!strcmp(res->value, "level-default")) {
- rss_hf &= (~ETH_RSS_LEVEL_MASK);
- rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_PMD_DEFAULT);
+ rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+ rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_PMD_DEFAULT);
} else if (!strcmp(res->value, "level-outer")) {
- rss_hf &= (~ETH_RSS_LEVEL_MASK);
- rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_OUTERMOST);
+ rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+ rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_OUTERMOST);
} else if (!strcmp(res->value, "level-inner")) {
- rss_hf &= (~ETH_RSS_LEVEL_MASK);
- rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_INNERMOST);
+ rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+ rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_INNERMOST);
} else if (!strcmp(res->value, "default"))
use_default = 1;
else if (isdigit(res->value[0]) && atoi(res->value) > 0 &&
@@ -2982,8 +2982,8 @@ parse_reta_config(const char *str,
return -1;
}
- idx = hash_index / RTE_RETA_GROUP_SIZE;
- shift = hash_index % RTE_RETA_GROUP_SIZE;
+ idx = hash_index / RTE_ETH_RETA_GROUP_SIZE;
+ shift = hash_index % RTE_ETH_RETA_GROUP_SIZE;
reta_conf[idx].mask |= (1ULL << shift);
reta_conf[idx].reta[shift] = nb_queue;
}
@@ -3012,10 +3012,10 @@ cmd_set_rss_reta_parsed(void *parsed_result,
} else
printf("The reta size of port %d is %u\n",
res->port_id, dev_info.reta_size);
- if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) {
+ if (dev_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
fprintf(stderr,
"Currently do not support more than %u entries of redirection table\n",
- ETH_RSS_RETA_SIZE_512);
+ RTE_ETH_RSS_RETA_SIZE_512);
return;
}
@@ -3086,8 +3086,8 @@ showport_parse_reta_config(struct rte_eth_rss_reta_entry64 *conf,
char *end;
char *str_fld[8];
uint16_t i;
- uint16_t num = (nb_entries + RTE_RETA_GROUP_SIZE - 1) /
- RTE_RETA_GROUP_SIZE;
+ uint16_t num = (nb_entries + RTE_ETH_RETA_GROUP_SIZE - 1) /
+ RTE_ETH_RETA_GROUP_SIZE;
int ret;
p = strchr(p0, '(');
@@ -3132,7 +3132,7 @@ cmd_showport_reta_parsed(void *parsed_result,
if (ret != 0)
return;
- max_reta_size = RTE_MIN(dev_info.reta_size, ETH_RSS_RETA_SIZE_512);
+ max_reta_size = RTE_MIN(dev_info.reta_size, RTE_ETH_RSS_RETA_SIZE_512);
if (res->size == 0 || res->size > max_reta_size) {
fprintf(stderr, "Invalid redirection table size: %u (1-%u)\n",
res->size, max_reta_size);
@@ -3272,7 +3272,7 @@ cmd_config_dcb_parsed(void *parsed_result,
return;
}
- if ((res->num_tcs != ETH_4_TCS) && (res->num_tcs != ETH_8_TCS)) {
+ if ((res->num_tcs != RTE_ETH_4_TCS) && (res->num_tcs != RTE_ETH_8_TCS)) {
fprintf(stderr,
"The invalid number of traffic class, only 4 or 8 allowed.\n");
return;
@@ -4276,9 +4276,9 @@ cmd_vlan_tpid_parsed(void *parsed_result,
enum rte_vlan_type vlan_type;
if (!strcmp(res->vlan_type, "inner"))
- vlan_type = ETH_VLAN_TYPE_INNER;
+ vlan_type = RTE_ETH_VLAN_TYPE_INNER;
else if (!strcmp(res->vlan_type, "outer"))
- vlan_type = ETH_VLAN_TYPE_OUTER;
+ vlan_type = RTE_ETH_VLAN_TYPE_OUTER;
else {
fprintf(stderr, "Unknown vlan type\n");
return;
@@ -4615,55 +4615,55 @@ csum_show(int port_id)
printf("Parse tunnel is %s\n",
(ports[port_id].parse_tunnel) ? "on" : "off");
printf("IP checksum offload is %s\n",
- (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
printf("UDP checksum offload is %s\n",
- (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
printf("TCP checksum offload is %s\n",
- (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
printf("SCTP checksum offload is %s\n",
- (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
printf("Outer-Ip checksum offload is %s\n",
- (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
printf("Outer-Udp checksum offload is %s\n",
- (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
/* display warnings if configuration is not supported by the NIC */
ret = eth_dev_info_get_print_err(port_id, &dev_info);
if (ret != 0)
return;
- if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) &&
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware IP checksum enabled but not supported by port %d\n",
port_id);
}
- if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) &&
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware UDP checksum enabled but not supported by port %d\n",
port_id);
}
- if ((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) &&
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware TCP checksum enabled but not supported by port %d\n",
port_id);
}
- if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) == 0) {
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware SCTP checksum enabled but not supported by port %d\n",
port_id);
}
- if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware outer IP checksum enabled but not supported by port %d\n",
port_id);
}
- if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
== 0) {
fprintf(stderr,
"Warning: hardware outer UDP checksum enabled but not supported by port %d\n",
@@ -4713,8 +4713,8 @@ cmd_csum_parsed(void *parsed_result,
if (!strcmp(res->proto, "ip")) {
if (hw == 0 || (dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_IPV4_CKSUM)) {
- csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) {
+ csum_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
} else {
fprintf(stderr,
"IP checksum offload is not supported by port %u\n",
@@ -4722,8 +4722,8 @@ cmd_csum_parsed(void *parsed_result,
}
} else if (!strcmp(res->proto, "udp")) {
if (hw == 0 || (dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_UDP_CKSUM)) {
- csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
+ csum_offloads |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
} else {
fprintf(stderr,
"UDP checksum offload is not supported by port %u\n",
@@ -4731,8 +4731,8 @@ cmd_csum_parsed(void *parsed_result,
}
} else if (!strcmp(res->proto, "tcp")) {
if (hw == 0 || (dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_TCP_CKSUM)) {
- csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
+ csum_offloads |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
} else {
fprintf(stderr,
"TCP checksum offload is not supported by port %u\n",
@@ -4740,8 +4740,8 @@ cmd_csum_parsed(void *parsed_result,
}
} else if (!strcmp(res->proto, "sctp")) {
if (hw == 0 || (dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_SCTP_CKSUM)) {
- csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) {
+ csum_offloads |= RTE_ETH_TX_OFFLOAD_SCTP_CKSUM;
} else {
fprintf(stderr,
"SCTP checksum offload is not supported by port %u\n",
@@ -4749,9 +4749,9 @@ cmd_csum_parsed(void *parsed_result,
}
} else if (!strcmp(res->proto, "outer-ip")) {
if (hw == 0 || (dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
csum_offloads |=
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
} else {
fprintf(stderr,
"Outer IP checksum offload is not supported by port %u\n",
@@ -4759,9 +4759,9 @@ cmd_csum_parsed(void *parsed_result,
}
} else if (!strcmp(res->proto, "outer-udp")) {
if (hw == 0 || (dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
csum_offloads |=
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
} else {
fprintf(stderr,
"Outer UDP checksum offload is not supported by port %u\n",
@@ -4916,7 +4916,7 @@ cmd_tso_set_parsed(void *parsed_result,
return;
if ((ports[res->port_id].tso_segsz != 0) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
fprintf(stderr, "Error: TSO is not supported by port %d\n",
res->port_id);
return;
@@ -4924,11 +4924,11 @@ cmd_tso_set_parsed(void *parsed_result,
if (ports[res->port_id].tso_segsz == 0) {
ports[res->port_id].dev_conf.txmode.offloads &=
- ~DEV_TX_OFFLOAD_TCP_TSO;
+ ~RTE_ETH_TX_OFFLOAD_TCP_TSO;
printf("TSO for non-tunneled packets is disabled\n");
} else {
ports[res->port_id].dev_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_TCP_TSO;
+ RTE_ETH_TX_OFFLOAD_TCP_TSO;
printf("TSO segment size for non-tunneled packets is %d\n",
ports[res->port_id].tso_segsz);
}
@@ -4940,7 +4940,7 @@ cmd_tso_set_parsed(void *parsed_result,
return;
if ((ports[res->port_id].tso_segsz != 0) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
fprintf(stderr,
"Warning: TSO enabled but not supported by port %d\n",
res->port_id);
@@ -5011,27 +5011,27 @@ check_tunnel_tso_nic_support(portid_t port_id)
if (eth_dev_info_get_print_err(port_id, &dev_info) != 0)
return dev_info;
- if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO))
+ if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO))
fprintf(stderr,
"Warning: VXLAN TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
- if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO))
+ if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
fprintf(stderr,
"Warning: GRE TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
- if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO))
+ if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO))
fprintf(stderr,
"Warning: IPIP TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
- if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+ if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
fprintf(stderr,
"Warning: GENEVE TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
- if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO))
+ if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO))
fprintf(stderr,
"Warning: IP TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
- if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO))
+ if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO))
fprintf(stderr,
"Warning: UDP TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
@@ -5059,20 +5059,20 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
dev_info = check_tunnel_tso_nic_support(res->port_id);
if (ports[res->port_id].tunnel_tso_segsz == 0) {
ports[res->port_id].dev_conf.txmode.offloads &=
- ~(DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_IP_TNL_TSO |
- DEV_TX_OFFLOAD_UDP_TNL_TSO);
+ ~(RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
printf("TSO for tunneled packets is disabled\n");
} else {
- uint64_t tso_offloads = (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_IP_TNL_TSO |
- DEV_TX_OFFLOAD_UDP_TNL_TSO);
+ uint64_t tso_offloads = (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
ports[res->port_id].dev_conf.txmode.offloads |=
(tso_offloads & dev_info.tx_offload_capa);
@@ -5095,7 +5095,7 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
fprintf(stderr,
"Warning: csum parse_tunnel must be set so that tunneled packets are recognized\n");
if (!(ports[res->port_id].dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
fprintf(stderr,
"Warning: csum set outer-ip must be set to hw if outer L3 is IPv4; not necessary for IPv6\n");
}
@@ -7227,9 +7227,9 @@ cmd_link_flow_ctrl_show_parsed(void *parsed_result,
return;
}
- if (fc_conf.mode == RTE_FC_RX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+ if (fc_conf.mode == RTE_ETH_FC_RX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
rx_fc_en = true;
- if (fc_conf.mode == RTE_FC_TX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+ if (fc_conf.mode == RTE_ETH_FC_TX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
tx_fc_en = true;
printf("\n%s Flow control infos for port %-2d %s\n",
@@ -7507,12 +7507,12 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
/*
* Rx on/off, flow control is enabled/disabled on RX side. This can indicate
- * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+ * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
* Tx on/off, flow control is enabled/disabled on TX side. This can indicate
- * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+ * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
*/
static enum rte_eth_fc_mode rx_tx_onoff_2_lfc_mode[2][2] = {
- {RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+ {RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
};
/* Partial command line, retrieve current configuration */
@@ -7525,11 +7525,11 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
return;
}
- if ((fc_conf.mode == RTE_FC_RX_PAUSE) ||
- (fc_conf.mode == RTE_FC_FULL))
+ if ((fc_conf.mode == RTE_ETH_FC_RX_PAUSE) ||
+ (fc_conf.mode == RTE_ETH_FC_FULL))
rx_fc_en = 1;
- if ((fc_conf.mode == RTE_FC_TX_PAUSE) ||
- (fc_conf.mode == RTE_FC_FULL))
+ if ((fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ||
+ (fc_conf.mode == RTE_ETH_FC_FULL))
tx_fc_en = 1;
}
@@ -7597,12 +7597,12 @@ cmd_priority_flow_ctrl_set_parsed(void *parsed_result,
/*
* Rx on/off, flow control is enabled/disabled on RX side. This can indicate
- * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+ * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
* Tx on/off, flow control is enabled/disabled on TX side. This can indicate
- * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+ * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
*/
static enum rte_eth_fc_mode rx_tx_onoff_2_pfc_mode[2][2] = {
- {RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+ {RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
};
memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_conf));
@@ -9250,13 +9250,13 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
int is_on = (strcmp(res->on, "on") == 0) ? 1 : 0;
if (!strcmp(res->what,"rxmode")) {
if (!strcmp(res->mode, "AUPE"))
- vf_rxmode |= ETH_VMDQ_ACCEPT_UNTAG;
+ vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_UNTAG;
else if (!strcmp(res->mode, "ROPE"))
- vf_rxmode |= ETH_VMDQ_ACCEPT_HASH_UC;
+ vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_HASH_UC;
else if (!strcmp(res->mode, "BAM"))
- vf_rxmode |= ETH_VMDQ_ACCEPT_BROADCAST;
+ vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_BROADCAST;
else if (!strncmp(res->mode, "MPE",3))
- vf_rxmode |= ETH_VMDQ_ACCEPT_MULTICAST;
+ vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_MULTICAST;
}
RTE_SET_USED(is_on);
@@ -9656,7 +9656,7 @@ cmd_tunnel_udp_config_parsed(void *parsed_result,
int ret;
tunnel_udp.udp_port = res->udp_port;
- tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+ tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
if (!strcmp(res->what, "add"))
ret = rte_eth_dev_udp_tunnel_port_add(res->port_id,
@@ -9722,13 +9722,13 @@ cmd_cfg_tunnel_udp_port_parsed(void *parsed_result,
tunnel_udp.udp_port = res->udp_port;
if (!strcmp(res->tunnel_type, "vxlan")) {
- tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+ tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
} else if (!strcmp(res->tunnel_type, "geneve")) {
- tunnel_udp.prot_type = RTE_TUNNEL_TYPE_GENEVE;
+ tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE;
} else if (!strcmp(res->tunnel_type, "vxlan-gpe")) {
- tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN_GPE;
+ tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN_GPE;
} else if (!strcmp(res->tunnel_type, "ecpri")) {
- tunnel_udp.prot_type = RTE_TUNNEL_TYPE_ECPRI;
+ tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_ECPRI;
} else {
fprintf(stderr, "Invalid tunnel type\n");
return;
@@ -11859,7 +11859,7 @@ cmd_set_macsec_offload_on_parsed(
if (ret != 0)
return;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
#ifdef RTE_NET_IXGBE
ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
#endif
@@ -11870,7 +11870,7 @@ cmd_set_macsec_offload_on_parsed(
switch (ret) {
case 0:
ports[port_id].dev_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MACSEC_INSERT;
+ RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
cmd_reconfig_device_queue(port_id, 1, 1);
break;
case -ENODEV:
@@ -11956,7 +11956,7 @@ cmd_set_macsec_offload_off_parsed(
if (ret != 0)
return;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
#ifdef RTE_NET_IXGBE
ret = rte_pmd_ixgbe_macsec_disable(port_id);
#endif
@@ -11964,7 +11964,7 @@ cmd_set_macsec_offload_off_parsed(
switch (ret) {
case 0:
ports[port_id].dev_conf.txmode.offloads &=
- ~DEV_TX_OFFLOAD_MACSEC_INSERT;
+ ~RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
cmd_reconfig_device_queue(port_id, 1, 1);
break;
case -ENODEV:
@@ -86,62 +86,62 @@ static const struct {
};
const struct rss_type_info rss_type_table[] = {
- { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
- ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
- ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
- ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS},
+ { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
+ RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
+ RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
+ RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS},
{ "none", 0 },
- { "eth", ETH_RSS_ETH },
- { "l2-src-only", ETH_RSS_L2_SRC_ONLY },
- { "l2-dst-only", ETH_RSS_L2_DST_ONLY },
- { "vlan", ETH_RSS_VLAN },
- { "s-vlan", ETH_RSS_S_VLAN },
- { "c-vlan", ETH_RSS_C_VLAN },
- { "ipv4", ETH_RSS_IPV4 },
- { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
- { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
- { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
- { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
- { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
- { "ipv6", ETH_RSS_IPV6 },
- { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
- { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
- { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
- { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
- { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
- { "l2-payload", ETH_RSS_L2_PAYLOAD },
- { "ipv6-ex", ETH_RSS_IPV6_EX },
- { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
- { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
- { "port", ETH_RSS_PORT },
- { "vxlan", ETH_RSS_VXLAN },
- { "geneve", ETH_RSS_GENEVE },
- { "nvgre", ETH_RSS_NVGRE },
- { "ip", ETH_RSS_IP },
- { "udp", ETH_RSS_UDP },
- { "tcp", ETH_RSS_TCP },
- { "sctp", ETH_RSS_SCTP },
- { "tunnel", ETH_RSS_TUNNEL },
+ { "eth", RTE_ETH_RSS_ETH },
+ { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
+ { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
+ { "vlan", RTE_ETH_RSS_VLAN },
+ { "s-vlan", RTE_ETH_RSS_S_VLAN },
+ { "c-vlan", RTE_ETH_RSS_C_VLAN },
+ { "ipv4", RTE_ETH_RSS_IPV4 },
+ { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
+ { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
+ { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
+ { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
+ { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
+ { "ipv6", RTE_ETH_RSS_IPV6 },
+ { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
+ { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
+ { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
+ { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
+ { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
+ { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
+ { "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
+ { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
+ { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
+ { "port", RTE_ETH_RSS_PORT },
+ { "vxlan", RTE_ETH_RSS_VXLAN },
+ { "geneve", RTE_ETH_RSS_GENEVE },
+ { "nvgre", RTE_ETH_RSS_NVGRE },
+ { "ip", RTE_ETH_RSS_IP },
+ { "udp", RTE_ETH_RSS_UDP },
+ { "tcp", RTE_ETH_RSS_TCP },
+ { "sctp", RTE_ETH_RSS_SCTP },
+ { "tunnel", RTE_ETH_RSS_TUNNEL },
{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
- { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
- { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
- { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
- { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
- { "esp", ETH_RSS_ESP },
- { "ah", ETH_RSS_AH },
- { "l2tpv3", ETH_RSS_L2TPV3 },
- { "pfcp", ETH_RSS_PFCP },
- { "pppoe", ETH_RSS_PPPOE },
- { "gtpu", ETH_RSS_GTPU },
- { "ecpri", ETH_RSS_ECPRI },
- { "mpls", ETH_RSS_MPLS },
- { "ipv4-chksum", ETH_RSS_IPV4_CHKSUM },
- { "l4-chksum", ETH_RSS_L4_CHKSUM },
+ { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
+ { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
+ { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
+ { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
+ { "esp", RTE_ETH_RSS_ESP },
+ { "ah", RTE_ETH_RSS_AH },
+ { "l2tpv3", RTE_ETH_RSS_L2TPV3 },
+ { "pfcp", RTE_ETH_RSS_PFCP },
+ { "pppoe", RTE_ETH_RSS_PPPOE },
+ { "gtpu", RTE_ETH_RSS_GTPU },
+ { "ecpri", RTE_ETH_RSS_ECPRI },
+ { "mpls", RTE_ETH_RSS_MPLS },
+ { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM },
+ { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM },
{ NULL, 0 },
};
@@ -538,39 +538,39 @@ static void
device_infos_display_speeds(uint32_t speed_capa)
{
printf("\n\tDevice speed capability:");
- if (speed_capa == ETH_LINK_SPEED_AUTONEG)
+ if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
printf(" Autonegotiate (all speeds)");
- if (speed_capa & ETH_LINK_SPEED_FIXED)
+ if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
printf(" Disable autonegotiate (fixed speed) ");
- if (speed_capa & ETH_LINK_SPEED_10M_HD)
+ if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
printf(" 10 Mbps half-duplex ");
- if (speed_capa & ETH_LINK_SPEED_10M)
+ if (speed_capa & RTE_ETH_LINK_SPEED_10M)
printf(" 10 Mbps full-duplex ");
- if (speed_capa & ETH_LINK_SPEED_100M_HD)
+ if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
printf(" 100 Mbps half-duplex ");
- if (speed_capa & ETH_LINK_SPEED_100M)
+ if (speed_capa & RTE_ETH_LINK_SPEED_100M)
printf(" 100 Mbps full-duplex ");
- if (speed_capa & ETH_LINK_SPEED_1G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_1G)
printf(" 1 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_2_5G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
printf(" 2.5 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_5G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_5G)
printf(" 5 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_10G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_10G)
printf(" 10 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_20G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_20G)
printf(" 20 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_25G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_25G)
printf(" 25 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_40G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_40G)
printf(" 40 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_50G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_50G)
printf(" 50 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_56G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_56G)
printf(" 56 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_100G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_100G)
printf(" 100 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_200G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_200G)
printf(" 200 Gbps ");
}
@@ -723,9 +723,9 @@ port_infos_display(portid_t port_id)
printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
- printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex"));
- printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ?
+ printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
("On") : ("Off"));
if (!rte_eth_dev_get_mtu(port_id, &mtu))
@@ -743,22 +743,22 @@ port_infos_display(portid_t port_id)
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (vlan_offload >= 0){
printf("VLAN offload: \n");
- if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
+ if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
printf(" strip on, ");
else
printf(" strip off, ");
- if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
+ if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
printf("filter on, ");
else
printf("filter off, ");
- if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
+ if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
printf("extend on, ");
else
printf("extend off, ");
- if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
+ if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
printf("qinq strip on\n");
else
printf("qinq strip off\n");
@@ -2953,8 +2953,8 @@ port_rss_reta_info(portid_t port_id,
}
for (i = 0; i < nb_entries; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << shift)))
continue;
printf("RSS RETA configuration: hash index=%u, queue=%u\n",
@@ -3427,7 +3427,7 @@ dcb_fwd_config_setup(void)
for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
fwd_lcores[lc_id]->stream_nb = 0;
fwd_lcores[lc_id]->stream_idx = sm_id;
- for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
+ for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
/* if the nb_queue is zero, means this tc is
* not enabled on the POOL
*/
@@ -4490,11 +4490,11 @@ vlan_extend_set(portid_t port_id, int on)
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
} else {
- vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
+ vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4520,11 +4520,11 @@ rx_vlan_strip_set(portid_t port_id, int on)
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
} else {
- vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4565,11 +4565,11 @@ rx_vlan_filter_set(portid_t port_id, int on)
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
} else {
- vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+ vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4595,11 +4595,11 @@ rx_vlan_qinq_strip_set(portid_t port_id, int on)
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+ vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
} else {
- vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+ vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4669,7 +4669,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
return;
if (ports[port_id].dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_QINQ_INSERT) {
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
fprintf(stderr, "Error, as QinQ has been enabled.\n");
return;
}
@@ -4678,7 +4678,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
if (ret != 0)
return;
- if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
+ if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
fprintf(stderr,
"Error: vlan insert is not supported by port %d\n",
port_id);
@@ -4686,7 +4686,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
}
tx_vlan_reset(port_id);
- ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
+ ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
ports[port_id].tx_vlan_id = vlan_id;
}
@@ -4705,7 +4705,7 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
if (ret != 0)
return;
- if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
+ if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
fprintf(stderr,
"Error: qinq insert not supported by port %d\n",
port_id);
@@ -4713,8 +4713,8 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
}
tx_vlan_reset(port_id);
- ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT);
+ ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
ports[port_id].tx_vlan_id = vlan_id;
ports[port_id].tx_vlan_id_outer = vlan_id_outer;
}
@@ -4723,8 +4723,8 @@ void
tx_vlan_reset(portid_t port_id)
{
ports[port_id].dev_conf.txmode.offloads &=
- ~(DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT);
+ ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
ports[port_id].tx_vlan_id = 0;
ports[port_id].tx_vlan_id_outer = 0;
}
@@ -5130,7 +5130,7 @@ set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
ret = eth_link_get_nowait_print_err(port_id, &link);
if (ret < 0)
return 1;
- if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
+ if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
rate > link.link_speed) {
fprintf(stderr,
"Invalid rate value:%u bigger than link speed: %u\n",
@@ -485,7 +485,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
ol_flags |= PKT_TX_IP_CKSUM;
} else {
- if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
ol_flags |= PKT_TX_IP_CKSUM;
} else {
ipv4_hdr->hdr_checksum = 0;
@@ -502,7 +502,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
/* do not recalculate udp cksum if it was 0 */
if (udp_hdr->dgram_cksum != 0) {
- if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
ol_flags |= PKT_TX_UDP_CKSUM;
} else {
udp_hdr->dgram_cksum = 0;
@@ -517,7 +517,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
if (tso_segsz)
ol_flags |= PKT_TX_TCP_SEG;
- else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+ else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
ol_flags |= PKT_TX_TCP_CKSUM;
} else {
tcp_hdr->cksum = 0;
@@ -532,7 +532,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
((char *)l3_hdr + info->l3_len);
/* sctp payload must be a multiple of 4 to be
* offloaded */
- if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
((ipv4_hdr->total_length & 0x3) == 0)) {
ol_flags |= PKT_TX_SCTP_CKSUM;
} else {
@@ -559,7 +559,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
ipv4_hdr->hdr_checksum = 0;
ol_flags |= PKT_TX_OUTER_IPV4;
- if (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
ol_flags |= PKT_TX_OUTER_IP_CKSUM;
else
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
@@ -576,7 +576,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
ol_flags |= PKT_TX_TCP_SEG;
/* Skip SW outer UDP checksum generation if HW supports it */
- if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) {
if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
udp_hdr->dgram_cksum
= rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
@@ -959,9 +959,9 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (info.is_tunnel == 1) {
if (info.tunnel_tso_segsz ||
(tx_offloads &
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
(tx_offloads &
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
m->outer_l2_len = info.outer_l2_len;
m->outer_l3_len = info.outer_l3_len;
m->l2_len = info.l2_len;
@@ -1022,19 +1022,19 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
rte_be_to_cpu_16(info.outer_ethertype),
info.outer_l3_len);
/* dump tx packet info */
- if ((tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM)) ||
+ if ((tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) ||
info.tso_segsz != 0)
printf("tx: m->l2_len=%d m->l3_len=%d "
"m->l4_len=%d\n",
m->l2_len, m->l3_len, m->l4_len);
if (info.is_tunnel == 1) {
if ((tx_offloads &
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
(tx_offloads &
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
(tx_ol_flags & PKT_TX_OUTER_IPV6))
printf("tx: m->outer_l2_len=%d "
"m->outer_l3_len=%d\n",
@@ -99,11 +99,11 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer;
tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
- if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
ol_flags |= PKT_TX_VLAN_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
@@ -72,11 +72,11 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
tx_offloads = txp->dev_conf.txmode.offloads;
- if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
ol_flags = PKT_TX_VLAN_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))
@@ -10,11 +10,11 @@ ol_flags_init(uint64_t tx_offload)
{
uint64_t ol_flags = 0;
- ol_flags |= (tx_offload & DEV_TX_OFFLOAD_VLAN_INSERT) ?
+ ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) ?
PKT_TX_VLAN : 0;
- ol_flags |= (tx_offload & DEV_TX_OFFLOAD_QINQ_INSERT) ?
+ ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) ?
PKT_TX_QINQ : 0;
- ol_flags |= (tx_offload & DEV_TX_OFFLOAD_MACSEC_INSERT) ?
+ ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) ?
PKT_TX_MACSEC : 0;
return ol_flags;
@@ -547,29 +547,29 @@ parse_xstats_list(const char *in_str, struct rte_eth_xstat_name **xstats,
static int
parse_link_speed(int n)
{
- uint32_t speed = ETH_LINK_SPEED_FIXED;
+ uint32_t speed = RTE_ETH_LINK_SPEED_FIXED;
switch (n) {
case 1000:
- speed |= ETH_LINK_SPEED_1G;
+ speed |= RTE_ETH_LINK_SPEED_1G;
break;
case 10000:
- speed |= ETH_LINK_SPEED_10G;
+ speed |= RTE_ETH_LINK_SPEED_10G;
break;
case 25000:
- speed |= ETH_LINK_SPEED_25G;
+ speed |= RTE_ETH_LINK_SPEED_25G;
break;
case 40000:
- speed |= ETH_LINK_SPEED_40G;
+ speed |= RTE_ETH_LINK_SPEED_40G;
break;
case 50000:
- speed |= ETH_LINK_SPEED_50G;
+ speed |= RTE_ETH_LINK_SPEED_50G;
break;
case 100000:
- speed |= ETH_LINK_SPEED_100G;
+ speed |= RTE_ETH_LINK_SPEED_100G;
break;
case 200000:
- speed |= ETH_LINK_SPEED_200G;
+ speed |= RTE_ETH_LINK_SPEED_200G;
break;
case 100:
case 10:
@@ -1002,13 +1002,13 @@ launch_args_parse(int argc, char** argv)
if (!strcmp(lgopts[opt_idx].name, "pkt-filter-size")) {
if (!strcmp(optarg, "64K"))
fdir_conf.pballoc =
- RTE_FDIR_PBALLOC_64K;
+ RTE_ETH_FDIR_PBALLOC_64K;
else if (!strcmp(optarg, "128K"))
fdir_conf.pballoc =
- RTE_FDIR_PBALLOC_128K;
+ RTE_ETH_FDIR_PBALLOC_128K;
else if (!strcmp(optarg, "256K"))
fdir_conf.pballoc =
- RTE_FDIR_PBALLOC_256K;
+ RTE_ETH_FDIR_PBALLOC_256K;
else
rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -"
" must be: 64K or 128K or 256K\n",
@@ -1050,34 +1050,34 @@ launch_args_parse(int argc, char** argv)
}
#endif
if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip"))
- rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
if (!strcmp(lgopts[opt_idx].name, "enable-lro"))
- rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
if (!strcmp(lgopts[opt_idx].name, "enable-scatter"))
- rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum"))
- rx_offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
if (!strcmp(lgopts[opt_idx].name,
"enable-rx-timestamp"))
- rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
if (!strcmp(lgopts[opt_idx].name, "enable-hw-vlan"))
- rx_offloads |= DEV_RX_OFFLOAD_VLAN;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN;
if (!strcmp(lgopts[opt_idx].name,
"enable-hw-vlan-filter"))
- rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
if (!strcmp(lgopts[opt_idx].name,
"enable-hw-vlan-strip"))
- rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
if (!strcmp(lgopts[opt_idx].name,
"enable-hw-vlan-extend"))
- rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
if (!strcmp(lgopts[opt_idx].name,
"enable-hw-qinq-strip"))
- rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
if (!strcmp(lgopts[opt_idx].name, "enable-drop-en"))
rx_drop_en = 1;
@@ -1099,13 +1099,13 @@ launch_args_parse(int argc, char** argv)
if (!strcmp(lgopts[opt_idx].name, "forward-mode"))
set_pkt_forwarding_mode(optarg);
if (!strcmp(lgopts[opt_idx].name, "rss-ip"))
- rss_hf = ETH_RSS_IP;
+ rss_hf = RTE_ETH_RSS_IP;
if (!strcmp(lgopts[opt_idx].name, "rss-udp"))
- rss_hf = ETH_RSS_UDP;
+ rss_hf = RTE_ETH_RSS_UDP;
if (!strcmp(lgopts[opt_idx].name, "rss-level-inner"))
- rss_hf |= ETH_RSS_LEVEL_INNERMOST;
+ rss_hf |= RTE_ETH_RSS_LEVEL_INNERMOST;
if (!strcmp(lgopts[opt_idx].name, "rss-level-outer"))
- rss_hf |= ETH_RSS_LEVEL_OUTERMOST;
+ rss_hf |= RTE_ETH_RSS_LEVEL_OUTERMOST;
if (!strcmp(lgopts[opt_idx].name, "rxq")) {
n = atoi(optarg);
if (n >= 0 && check_nb_rxq((queueid_t)n) == 0)
@@ -1495,12 +1495,12 @@ launch_args_parse(int argc, char** argv)
if (!strcmp(lgopts[opt_idx].name, "rx-mq-mode")) {
char *end = NULL;
n = strtoul(optarg, &end, 16);
- if (n >= 0 && n <= ETH_MQ_RX_VMDQ_DCB_RSS)
+ if (n >= 0 && n <= RTE_ETH_MQ_RX_VMDQ_DCB_RSS)
rx_mq_mode = (enum rte_eth_rx_mq_mode)n;
else
rte_exit(EXIT_FAILURE,
"rx-mq-mode must be >= 0 and <= %d\n",
- ETH_MQ_RX_VMDQ_DCB_RSS);
+ RTE_ETH_MQ_RX_VMDQ_DCB_RSS);
}
if (!strcmp(lgopts[opt_idx].name, "record-core-cycles"))
record_core_cycles = 1;
@@ -349,7 +349,7 @@ uint64_t noisy_lkup_num_reads_writes;
/*
* Receive Side Scaling (RSS) configuration.
*/
-uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
+uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
/*
* Port topology configuration
@@ -460,12 +460,12 @@ lcoreid_t latencystats_lcore_id = -1;
struct rte_eth_rxmode rx_mode;
struct rte_eth_txmode tx_mode = {
- .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
+ .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
};
-struct rte_fdir_conf fdir_conf = {
+struct rte_eth_fdir_conf fdir_conf = {
.mode = RTE_FDIR_MODE_NONE,
- .pballoc = RTE_FDIR_PBALLOC_64K,
+ .pballoc = RTE_ETH_FDIR_PBALLOC_64K,
.status = RTE_FDIR_REPORT_STATUS,
.mask = {
.vlan_tci_mask = 0xFFEF,
@@ -524,7 +524,7 @@ uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
/*
* hexadecimal bitmask of RX mq mode can be enabled.
*/
-enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
+enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
/*
* Used to set forced link speed
@@ -1578,9 +1578,9 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
if (ret != 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
- if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
- ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Apply Rx offloads configuration */
for (i = 0; i < port->dev_info.max_rx_queues; i++)
@@ -1717,8 +1717,8 @@ init_config(void)
init_port_config();
- gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
+ gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
/*
* Records which Mbuf pool to use by each logical core, if needed.
*/
@@ -3466,7 +3466,7 @@ check_all_ports_link_status(uint32_t port_mask)
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
@@ -3769,17 +3769,17 @@ init_port_config(void)
if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
port->dev_conf.rxmode.mq_mode =
(enum rte_eth_rx_mq_mode)
- (rx_mq_mode & ETH_MQ_RX_RSS);
+ (rx_mq_mode & RTE_ETH_MQ_RX_RSS);
} else {
- port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+ port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
port->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_RSS_HASH;
+ ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
for (i = 0;
i < port->dev_info.nb_rx_queues;
i++)
port->rx_conf[i].offloads &=
- ~DEV_RX_OFFLOAD_RSS_HASH;
+ ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
}
}
@@ -3867,9 +3867,9 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
vmdq_rx_conf->enable_default_pool = 0;
vmdq_rx_conf->default_pool = 0;
vmdq_rx_conf->nb_queue_pools =
- (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+ (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
vmdq_tx_conf->nb_queue_pools =
- (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+ (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
@@ -3877,7 +3877,7 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
vmdq_rx_conf->pool_map[i].pools =
1 << (i % vmdq_rx_conf->nb_queue_pools);
}
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
}
@@ -3885,8 +3885,8 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
/* set DCB mode of RX and TX of multiple queues */
eth_conf->rxmode.mq_mode =
(enum rte_eth_rx_mq_mode)
- (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
- eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+ (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
+ eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
} else {
struct rte_eth_dcb_rx_conf *rx_conf =
ð_conf->rx_adv_conf.dcb_rx_conf;
@@ -3902,23 +3902,23 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
rx_conf->nb_tcs = num_tcs;
tx_conf->nb_tcs = num_tcs;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
rx_conf->dcb_tc[i] = i % num_tcs;
tx_conf->dcb_tc[i] = i % num_tcs;
}
eth_conf->rxmode.mq_mode =
(enum rte_eth_rx_mq_mode)
- (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
+ (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
eth_conf->rx_adv_conf.rss_conf = rss_conf;
- eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
+ eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
}
if (pfc_en)
eth_conf->dcb_capability_en =
- ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
+ RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
else
- eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
+ eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
return 0;
}
@@ -3947,7 +3947,7 @@ init_port_dcb_config(portid_t pid,
retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
if (retval < 0)
return retval;
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
/* re-configure the device . */
retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
@@ -3997,7 +3997,7 @@ init_port_dcb_config(portid_t pid,
rxtx_port_config(pid);
/* VLAN filter */
- rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
for (i = 0; i < RTE_DIM(vlan_tags); i++)
rx_vft_set(pid, vlan_tags[i], 1);
@@ -493,7 +493,7 @@ extern lcoreid_t bitrate_lcore_id;
extern uint8_t bitrate_enabled;
#endif
-extern struct rte_fdir_conf fdir_conf;
+extern struct rte_eth_fdir_conf fdir_conf;
extern uint32_t max_rx_pkt_len;
@@ -354,11 +354,11 @@ pkt_burst_transmit(struct fwd_stream *fs)
tx_offloads = txp->dev_conf.txmode.offloads;
vlan_tci = txp->tx_vlan_id;
vlan_tci_outer = txp->tx_vlan_id_outer;
- if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
ol_flags = PKT_TX_VLAN_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
/*
@@ -14,10 +14,10 @@ test_link_status_up_default(void)
{
int ret = 0;
struct rte_eth_link link_status = {
- .link_speed = ETH_SPEED_NUM_2_5G,
- .link_status = ETH_LINK_UP,
- .link_autoneg = ETH_LINK_AUTONEG,
- .link_duplex = ETH_LINK_FULL_DUPLEX
+ .link_speed = RTE_ETH_SPEED_NUM_2_5G,
+ .link_status = RTE_ETH_LINK_UP,
+ .link_autoneg = RTE_ETH_LINK_AUTONEG,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX
};
char text[RTE_ETH_LINK_MAX_STR_LEN];
@@ -27,9 +27,9 @@ test_link_status_up_default(void)
TEST_ASSERT_BUFFERS_ARE_EQUAL("Link up at 2.5 Gbps FDX Autoneg",
text, strlen(text), "Invalid default link status string");
- link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
- link_status.link_autoneg = ETH_LINK_FIXED;
- link_status.link_speed = ETH_SPEED_NUM_10M,
+ link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link_status.link_autoneg = RTE_ETH_LINK_FIXED;
+ link_status.link_speed = RTE_ETH_SPEED_NUM_10M;
ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
printf("Default link up #2: %s\n", text);
RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -37,7 +37,7 @@ test_link_status_up_default(void)
text, strlen(text), "Invalid default link status "
"string with HDX");
- link_status.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ link_status.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
printf("Default link up #3: %s\n", text);
RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -45,7 +45,7 @@ test_link_status_up_default(void)
text, strlen(text), "Invalid default link status "
"string with HDX");
- link_status.link_speed = ETH_SPEED_NUM_NONE;
+ link_status.link_speed = RTE_ETH_SPEED_NUM_NONE;
ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
printf("Default link up #3: %s\n", text);
RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -54,9 +54,9 @@ test_link_status_up_default(void)
"string with HDX");
/* test max str len */
- link_status.link_speed = ETH_SPEED_NUM_200G;
- link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
- link_status.link_autoneg = ETH_LINK_AUTONEG;
+ link_status.link_speed = RTE_ETH_SPEED_NUM_200G;
+ link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link_status.link_autoneg = RTE_ETH_LINK_AUTONEG;
ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
printf("Default link up #4:len = %d, %s\n", ret, text);
RTE_TEST_ASSERT(ret < RTE_ETH_LINK_MAX_STR_LEN,
@@ -69,10 +69,10 @@ test_link_status_down_default(void)
{
int ret = 0;
struct rte_eth_link link_status = {
- .link_speed = ETH_SPEED_NUM_2_5G,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG,
- .link_duplex = ETH_LINK_FULL_DUPLEX
+ .link_speed = RTE_ETH_SPEED_NUM_2_5G,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_AUTONEG,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX
};
char text[RTE_ETH_LINK_MAX_STR_LEN];
@@ -90,9 +90,9 @@ test_link_status_invalid(void)
int ret = 0;
struct rte_eth_link link_status = {
.link_speed = 55555,
- .link_status = ETH_LINK_UP,
- .link_autoneg = ETH_LINK_AUTONEG,
- .link_duplex = ETH_LINK_FULL_DUPLEX
+ .link_status = RTE_ETH_LINK_UP,
+ .link_autoneg = RTE_ETH_LINK_AUTONEG,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX
};
char text[RTE_ETH_LINK_MAX_STR_LEN];
@@ -116,21 +116,21 @@ test_link_speed_all_values(void)
const char *value;
uint32_t link_speed;
} speed_str_map[] = {
- { "None", ETH_SPEED_NUM_NONE },
- { "10 Mbps", ETH_SPEED_NUM_10M },
- { "100 Mbps", ETH_SPEED_NUM_100M },
- { "1 Gbps", ETH_SPEED_NUM_1G },
- { "2.5 Gbps", ETH_SPEED_NUM_2_5G },
- { "5 Gbps", ETH_SPEED_NUM_5G },
- { "10 Gbps", ETH_SPEED_NUM_10G },
- { "20 Gbps", ETH_SPEED_NUM_20G },
- { "25 Gbps", ETH_SPEED_NUM_25G },
- { "40 Gbps", ETH_SPEED_NUM_40G },
- { "50 Gbps", ETH_SPEED_NUM_50G },
- { "56 Gbps", ETH_SPEED_NUM_56G },
- { "100 Gbps", ETH_SPEED_NUM_100G },
- { "200 Gbps", ETH_SPEED_NUM_200G },
- { "Unknown", ETH_SPEED_NUM_UNKNOWN },
+ { "None", RTE_ETH_SPEED_NUM_NONE },
+ { "10 Mbps", RTE_ETH_SPEED_NUM_10M },
+ { "100 Mbps", RTE_ETH_SPEED_NUM_100M },
+ { "1 Gbps", RTE_ETH_SPEED_NUM_1G },
+ { "2.5 Gbps", RTE_ETH_SPEED_NUM_2_5G },
+ { "5 Gbps", RTE_ETH_SPEED_NUM_5G },
+ { "10 Gbps", RTE_ETH_SPEED_NUM_10G },
+ { "20 Gbps", RTE_ETH_SPEED_NUM_20G },
+ { "25 Gbps", RTE_ETH_SPEED_NUM_25G },
+ { "40 Gbps", RTE_ETH_SPEED_NUM_40G },
+ { "50 Gbps", RTE_ETH_SPEED_NUM_50G },
+ { "56 Gbps", RTE_ETH_SPEED_NUM_56G },
+ { "100 Gbps", RTE_ETH_SPEED_NUM_100G },
+ { "200 Gbps", RTE_ETH_SPEED_NUM_200G },
+ { "Unknown", RTE_ETH_SPEED_NUM_UNKNOWN },
{ "Invalid", 50505 }
};
@@ -103,7 +103,7 @@ port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
{
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
},
.intr_conf = {
.rxq = 1,
@@ -118,7 +118,7 @@ port_init(uint16_t port, struct rte_mempool *mp)
{
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
},
};
@@ -74,7 +74,7 @@ static const struct rte_eth_txconf tx_conf = {
static const struct rte_eth_conf port_conf = {
.txmode = {
- .mq_mode = ETH_DCB_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
@@ -134,11 +134,11 @@ static uint16_t vlan_id = 0x100;
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 0,
};
@@ -107,11 +107,11 @@ static struct link_bonding_unittest_params test_params = {
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 0,
};
@@ -52,7 +52,7 @@ struct slave_conf {
struct rte_eth_rss_conf rss_conf;
uint8_t rss_key[40];
- struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_RETA_GROUP_SIZE];
+ struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
uint8_t is_slave;
struct rte_ring *rxtx_queue[RXTX_QUEUE_COUNT];
@@ -61,7 +61,7 @@ struct slave_conf {
struct link_bonding_rssconf_unittest_params {
uint8_t bond_port_id;
struct rte_eth_dev_info bond_dev_info;
- struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_RETA_GROUP_SIZE];
+ struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
struct slave_conf slave_ports[SLAVE_COUNT];
struct rte_mempool *mbuf_pool;
@@ -80,27 +80,27 @@ static struct link_bonding_rssconf_unittest_params test_params = {
*/
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 0,
};
static struct rte_eth_conf rss_pmd_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IPV6,
+ .rss_hf = RTE_ETH_RSS_IPV6,
},
},
.lpbk_mode = 0,
@@ -207,13 +207,13 @@ bond_slaves(void)
static int
reta_set(uint16_t port_id, uint8_t value, int reta_size)
{
- struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_RETA_GROUP_SIZE];
+ struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_ETH_RETA_GROUP_SIZE];
int i, j;
- for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++) {
+ for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++) {
/* select all fields to set */
reta_conf[i].mask = ~0LL;
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
reta_conf[i].reta[j] = value;
}
@@ -232,8 +232,8 @@ reta_check_synced(struct slave_conf *port)
for (i = 0; i < test_params.bond_dev_info.reta_size;
i++) {
- int index = i / RTE_RETA_GROUP_SIZE;
- int shift = i % RTE_RETA_GROUP_SIZE;
+ int index = i / RTE_ETH_RETA_GROUP_SIZE;
+ int shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (port->reta_conf[index].reta[shift] !=
test_params.bond_reta_conf[index].reta[shift])
@@ -251,7 +251,7 @@ static int
bond_reta_fetch(void) {
unsigned j;
- for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_RETA_GROUP_SIZE;
+ for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
j++)
test_params.bond_reta_conf[j].mask = ~0LL;
@@ -268,7 +268,7 @@ static int
slave_reta_fetch(struct slave_conf *port) {
unsigned j;
- for (j = 0; j < port->dev_info.reta_size / RTE_RETA_GROUP_SIZE; j++)
+ for (j = 0; j < port->dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE; j++)
port->reta_conf[j].mask = ~0LL;
TEST_ASSERT_SUCCESS(rte_eth_dev_rss_reta_query(port->port_id,
@@ -62,11 +62,11 @@ static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 1, /* enable loopback */
};
@@ -155,7 +155,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
@@ -822,7 +822,7 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
/* bulk alloc rx, full-featured tx */
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
return 0;
} else if (!strcmp(mode, "hybrid")) {
/* bulk alloc rx, vector tx
@@ -831,13 +831,13 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
*/
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
return 0;
} else if (!strcmp(mode, "full")) {
/* full feature rx,tx pair */
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
return 0;
}
@@ -53,7 +53,7 @@ static int virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
void *pkt = NULL;
struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
- eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
eth_dev->data->dev_started = 0;
while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
rte_pktmbuf_free(pkt);
@@ -168,7 +168,7 @@ virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
int wait_to_complete __rte_unused)
{
if (!bonded_eth_dev->data->dev_started)
- bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ bonded_eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
@@ -562,9 +562,9 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
eth_dev->data->nb_rx_queues = (uint16_t)1;
eth_dev->data->nb_tx_queues = (uint16_t)1;
- eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
- eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
+ eth_dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
+ eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL)
@@ -42,7 +42,7 @@ Features of the OCTEON cnxk SSO PMD are:
- HW managed packets enqueued from ethdev to eventdev exposed through event eth
RX adapter.
- N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
capability while maintaining receive packet order.
- Full Rx/Tx offload support defined through ethdev queue configuration.
- HW managed event vectorization on CN10K for packets enqueued from ethdev to
@@ -35,7 +35,7 @@ Features of the OCTEON TX2 SSO PMD are:
- HW managed packets enqueued from ethdev to eventdev exposed through event eth
RX adapter.
- N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
capability while maintaining receive packet order.
- Full Rx/Tx offload support defined through ethdev queue config.
@@ -70,5 +70,5 @@ Features and Limitations
------------------------
The PMD will re-insert the VLAN tag transparently to the packet if the kernel
-strips it, as long as the ``DEV_RX_OFFLOAD_VLAN_STRIP`` is not enabled by the
+strips it, as long as the ``RTE_ETH_RX_OFFLOAD_VLAN_STRIP`` is not enabled by the
application.
@@ -877,21 +877,21 @@ processing. This improved performance is derived from a number of optimizations:
* TX: only the following reduced set of transmit offloads is supported in
vector mode::
- DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
* RX: only the following reduced set of receive offloads is supported in
vector mode (note that jumbo MTU is allowed only when the MTU setting
- does not require `DEV_RX_OFFLOAD_SCATTER` to be enabled)::
-
- DEV_RX_OFFLOAD_VLAN_STRIP
- DEV_RX_OFFLOAD_KEEP_CRC
- DEV_RX_OFFLOAD_IPV4_CKSUM
- DEV_RX_OFFLOAD_UDP_CKSUM
- DEV_RX_OFFLOAD_TCP_CKSUM
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
- DEV_RX_OFFLOAD_RSS_HASH
- DEV_RX_OFFLOAD_VLAN_FILTER
+ does not require `RTE_ETH_RX_OFFLOAD_SCATTER` to be enabled)::
+
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+ RTE_ETH_RX_OFFLOAD_RSS_HASH
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER
The BNXT Vector PMD is enabled in DPDK builds by default. The decision to enable
vector processing is made at run-time when the port is started; if no transmit
@@ -432,7 +432,7 @@ Limitations
.. code-block:: console
vlan_offload = rte_eth_dev_get_vlan_offload(port);
- vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
+ vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
rte_eth_dev_set_vlan_offload(port, vlan_offload);
Another alternative is modify the adapter's ingress VLAN rewrite mode so that
@@ -30,7 +30,7 @@ Speed capabilities
Supports getting the speed capabilities that the current device is capable of.
-* **[provides] rte_eth_dev_info**: ``speed_capa:ETH_LINK_SPEED_*``.
+* **[provides] rte_eth_dev_info**: ``speed_capa:RTE_ETH_LINK_SPEED_*``.
* **[related] API**: ``rte_eth_dev_info_get()``.
@@ -101,11 +101,11 @@ Supports Rx interrupts.
Lock-free Tx queue
------------------
-If a PMD advertises DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+If a PMD advertises RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
invoke rte_eth_tx_burst() concurrently on the same Tx queue without SW lock.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MT_LOCKFREE``.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MT_LOCKFREE``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
* **[related] API**: ``rte_eth_tx_burst()``.
@@ -117,8 +117,8 @@ Fast mbuf free
Supports optimization for fast release of mbufs following successful Tx.
Requires that per queue, all mbufs come from the same mempool and has refcnt = 1.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
.. _nic_features_free_tx_mbuf_on_demand:
@@ -177,7 +177,7 @@ Scattered Rx
Supports receiving segmented mbufs.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SCATTER``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SCATTER``.
* **[implements] datapath**: ``Scattered Rx function``.
* **[implements] rte_eth_dev_data**: ``scattered_rx``.
* **[provides] eth_dev_ops**: ``rxq_info_get:scattered_rx``.
@@ -205,12 +205,12 @@ LRO
Supports Large Receive Offload.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
``dev_conf.rxmode.max_lro_pkt_size``.
* **[implements] datapath**: ``LRO functionality``.
* **[implements] rte_eth_dev_data**: ``lro``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_LRO``, ``mbuf.tso_segsz``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
* **[provides] rte_eth_dev_info**: ``max_lro_pkt_size``.
@@ -221,12 +221,12 @@ TSO
Supports TCP Segmentation Offloading.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_TCP_TSO``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_TCP_TSO``.
* **[uses] rte_eth_desc_lim**: ``nb_seg_max``, ``nb_mtu_seg_max``.
* **[uses] mbuf**: ``mbuf.ol_flags:`` ``PKT_TX_TCP_SEG``, ``PKT_TX_IPV4``, ``PKT_TX_IPV6``, ``PKT_TX_IP_CKSUM``.
* **[uses] mbuf**: ``mbuf.tso_segsz``, ``mbuf.l2_len``, ``mbuf.l3_len``, ``mbuf.l4_len``.
* **[implements] datapath**: ``TSO functionality``.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_TCP_TSO,DEV_TX_OFFLOAD_UDP_TSO``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_TCP_TSO,RTE_ETH_TX_OFFLOAD_UDP_TSO``.
.. _nic_features_promiscuous_mode:
@@ -287,9 +287,9 @@ RSS hash
Supports RSS hashing on RX.
-* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_RSS_FLAG``.
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_RSS_FLAG``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.rss_conf``.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
* **[provides] rte_eth_dev_info**: ``flow_type_rss_offloads``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
@@ -302,7 +302,7 @@ Inner RSS
Supports RX RSS hashing on Inner headers.
* **[uses] rte_flow_action_rss**: ``level``.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
@@ -339,7 +339,7 @@ VMDq
Supports Virtual Machine Device Queues (VMDq).
-* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_VMDQ_FLAG``.
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_VMDQ_FLAG``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_rx_conf``.
* **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@@ -362,7 +362,7 @@ DCB
Supports Data Center Bridging (DCB).
-* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_DCB_FLAG``.
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_DCB_FLAG``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.dcb_rx_conf``.
* **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@@ -378,7 +378,7 @@ VLAN filter
Supports filtering of a VLAN Tag identifier.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_FILTER``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_FILTER``.
* **[implements] eth_dev_ops**: ``vlan_filter_set``.
* **[related] API**: ``rte_eth_dev_vlan_filter()``.
@@ -416,13 +416,13 @@ Supports inline crypto processing defined by rte_security library to perform cry
operations of security protocol while packet is received in NIC. NIC is not aware
of protocol operations. See Security library and PMD documentation for more details.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
* **[uses] mbuf**: ``mbuf.l2_len``.
* **[implements] rte_security_ops**: ``session_create``, ``session_update``,
``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
* **[provides] rte_security_ops, capabilities_get**: ``action: RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO``
@@ -438,14 +438,14 @@ protocol processing for the security protocol (e.g. IPsec, MACSEC) while the
packet is received at NIC. The NIC is capable of understanding the security
protocol operations. See security library and PMD documentation for more details.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
* **[uses] mbuf**: ``mbuf.l2_len``.
* **[implements] rte_security_ops**: ``session_create``, ``session_update``,
``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``get_userdata``,
``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
* **[provides] rte_security_ops, capabilities_get**: ``action: RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL``
@@ -459,7 +459,7 @@ CRC offload
Supports CRC stripping by hardware.
A PMD assumed to support CRC stripping by default. PMD should advertise if it supports keeping CRC.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_KEEP_CRC``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_KEEP_CRC``.
.. _nic_features_vlan_offload:
@@ -469,13 +469,13 @@ VLAN offload
Supports VLAN offload to hardware.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_STRIP,DEV_RX_OFFLOAD_VLAN_FILTER,DEV_RX_OFFLOAD_VLAN_EXTEND``.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_STRIP,RTE_ETH_RX_OFFLOAD_VLAN_FILTER,RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_VLAN``, ``mbuf.vlan_tci``.
* **[implements] eth_dev_ops**: ``vlan_offload_set``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN`` ``mbuf.vlan_tci``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_VLAN_STRIP``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_VLAN_STRIP``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
* **[related] API**: ``rte_eth_dev_set_vlan_offload()``,
``rte_eth_dev_get_vlan_offload()``.
@@ -487,14 +487,14 @@ QinQ offload
Supports QinQ (queue in queue) offload.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_QINQ_STRIP``.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_QINQ``, ``mbuf.vlan_tci_outer``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_QINQ_STRIPPED``, ``mbuf.ol_flags:PKT_RX_QINQ``,
``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN``
``mbuf.vlan_tci``, ``mbuf.vlan_tci_outer``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_QINQ_STRIP``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
.. _nic_features_fec:
@@ -508,7 +508,7 @@ information to correct the bit errors generated during data packet transmission
improves signal quality but also brings a delay to signals. This function can be enabled or disabled as required.
* **[implements] eth_dev_ops**: ``fec_get_capability``, ``fec_get``, ``fec_set``.
-* **[provides] rte_eth_fec_capa**: ``speed:ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
+* **[provides] rte_eth_fec_capa**: ``speed:RTE_ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
* **[related] API**: ``rte_eth_fec_get_capability()``, ``rte_eth_fec_get()``, ``rte_eth_fec_set()``.
@@ -519,16 +519,16 @@ L3 checksum offload
Supports L3 checksum offload.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_IPV4_CKSUM``.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``.
* **[uses] mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_IP_CKSUM_UNKNOWN`` |
``PKT_RX_IP_CKSUM_BAD`` | ``PKT_RX_IP_CKSUM_GOOD`` |
``PKT_RX_IP_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_IPV4_CKSUM``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
.. _nic_features_l4_checksum_offload:
@@ -538,8 +538,8 @@ L4 checksum offload
Supports L4 checksum offload.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
@@ -547,8 +547,8 @@ Supports L4 checksum offload.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_L4_CKSUM_UNKNOWN`` |
``PKT_RX_L4_CKSUM_BAD`` | ``PKT_RX_L4_CKSUM_GOOD`` |
``PKT_RX_L4_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
.. _nic_features_hw_timestamp:
@@ -557,10 +557,10 @@ Timestamp offload
Supports Timestamp.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_TIMESTAMP``.
* **[provides] mbuf**: ``mbuf.timestamp``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
* **[related] eth_dev_ops**: ``read_clock``.
.. _nic_features_macsec_offload:
@@ -570,11 +570,11 @@ MACsec offload
Supports MACsec.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_MACSEC_STRIP``.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_MACSEC``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_MACSEC_STRIP``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
.. _nic_features_inner_l3_checksum:
@@ -584,16 +584,16 @@ Inner L3 checksum
Supports inner packet L3 checksum.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
``mbuf.ol_flags:PKT_TX_OUTER_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
* **[uses] mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_IP_CKSUM_BAD``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
.. _nic_features_inner_l4_checksum:
@@ -603,15 +603,15 @@ Inner L4 checksum
Supports inner packet L4 checksum.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_L4_CKSUM_UNKNOWN`` |
``PKT_RX_OUTER_L4_CKSUM_BAD`` | ``PKT_RX_OUTER_L4_CKSUM_GOOD`` | ``PKT_RX_OUTER_L4_CKSUM_INVALID``.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
``mbuf.ol_flags:PKT_TX_OUTER_UDP_CKSUM``.
* **[uses] mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
.. _nic_features_shared_rx_queue:
@@ -78,11 +78,11 @@ To enable via ``RX_OLFLAGS`` use ``RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y``.
To guarantee the constraint, the following capabilities in ``dev_conf.rxmode.offloads``
will be checked:
-* ``DEV_RX_OFFLOAD_VLAN_EXTEND``
+* ``RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``
-* ``DEV_RX_OFFLOAD_CHECKSUM``
+* ``RTE_ETH_RX_OFFLOAD_CHECKSUM``
-* ``DEV_RX_OFFLOAD_HEADER_SPLIT``
+* ``RTE_ETH_RX_OFFLOAD_HEADER_SPLIT``
* ``fdir_conf->mode``
@@ -216,21 +216,21 @@ For example,
* If the max number of VFs (max_vfs) is set in the range of 1 to 32:
If the number of Rx queues is specified as 4 (``--rxq=4`` in testpmd), then there are totally 32
- pools (ETH_32_POOLS), and each VF could have 4 Rx queues;
+ pools (RTE_ETH_32_POOLS), and each VF could have 4 Rx queues;
If the number of Rx queues is specified as 2 (``--rxq=2`` in testpmd), then there are totally 32
- pools (ETH_32_POOLS), and each VF could have 2 Rx queues;
+ pools (RTE_ETH_32_POOLS), and each VF could have 2 Rx queues;
* If the max number of VFs (max_vfs) is in the range of 33 to 64:
If the number of Rx queues in specified as 4 (``--rxq=4`` in testpmd), then error message is expected
as ``rxq`` is not correct at this case;
- If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (ETH_64_POOLS),
+ If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (RTE_ETH_64_POOLS),
and each VF have 2 Rx queues;
- On host, to enable VF RSS functionality, rx mq mode should be set as ETH_MQ_RX_VMDQ_RSS
- or ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
+ On host, to enable VF RSS functionality, rx mq mode should be set as RTE_ETH_MQ_RX_VMDQ_RSS
+ or RTE_ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
It also needs config VF RSS information like hash function, RSS key, RSS key length.
.. note::
@@ -89,13 +89,13 @@ Other features are supported using optional MACRO configuration. They include:
To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be checked:
-* DEV_RX_OFFLOAD_VLAN_STRIP
+* RTE_ETH_RX_OFFLOAD_VLAN_STRIP
-* DEV_RX_OFFLOAD_VLAN_EXTEND
+* RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
-* DEV_RX_OFFLOAD_CHECKSUM
+* RTE_ETH_RX_OFFLOAD_CHECKSUM
-* DEV_RX_OFFLOAD_HEADER_SPLIT
+* RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
* dev_conf
@@ -163,13 +163,13 @@ l3fwd
~~~~~
When running l3fwd with vPMD, there is one thing to note.
-In the configuration, ensure that DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
+In the configuration, ensure that RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
Otherwise, by default, RX vPMD is disabled.
load_balancer
~~~~~~~~~~~~~
-As in the case of l3fwd, to enable vPMD, do NOT set DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
+As in the case of l3fwd, to enable vPMD, do NOT set RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
In addition, for improved performance, use -bsz "(32,32),(64,64),(32,32)" in load_balancer to avoid using the default burst size of 144.
@@ -371,7 +371,7 @@ Limitations
- CRC:
- - ``DEV_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
+ - ``RTE_ETH_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
for some NICs (such as ConnectX-6 Dx, ConnectX-6 Lx, and BlueField-2).
The capability bit ``scatter_fcs_w_decap_disable`` shows NIC support.
@@ -611,7 +611,7 @@ Driver options
small-packet traffic.
When MPRQ is enabled, MTU can be larger than the size of
- user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will
+ user-provided mbuf even if RTE_ETH_RX_OFFLOAD_SCATTER isn't enabled. PMD will
configure large stride size enough to accommodate MTU as long as
device allows. Note that this can waste system memory compared to enabling Rx
scatter and multi-segment packet.
@@ -275,7 +275,7 @@ An example utility for eBPF instruction generation in the format of C arrays wil
be added in next releases
TAP reports on supported RSS functions as part of dev_infos_get callback:
-``ETH_RSS_IP``, ``ETH_RSS_UDP`` and ``ETH_RSS_TCP``.
+``RTE_ETH_RSS_IP``, ``RTE_ETH_RSS_UDP`` and ``RTE_ETH_RSS_TCP``.
**Known limitation:** TAP supports all of the above hash functions together
and not in partial combinations.
@@ -194,11 +194,11 @@ To segment an outgoing packet, an application must:
- the bit mask of required GSO types. The GSO library uses the same macros as
those that describe a physical device's TX offloading capabilities (i.e.
- ``DEV_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
+ ``RTE_ETH_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
wants to segment TCP/IPv4 packets, it should set gso_types to
- ``DEV_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
- supported for gso_types are ``DEV_TX_OFFLOAD_VXLAN_TNL_TSO``, and
- ``DEV_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
+ ``RTE_ETH_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
+ supported for gso_types are ``RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO``, and
+ ``RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
allowed.
- a flag, that indicates whether the IPv4 headers of output segments should
@@ -137,7 +137,7 @@ a vxlan-encapsulated tcp packet:
mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM
set out_ip checksum to 0 in the packet
- This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+ This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
- calculate checksum of out_ip and out_udp::
@@ -147,8 +147,8 @@ a vxlan-encapsulated tcp packet:
set out_ip checksum to 0 in the packet
set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum()
- This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM
- and DEV_TX_OFFLOAD_UDP_CKSUM.
+ This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+ and RTE_ETH_TX_OFFLOAD_UDP_CKSUM.
- calculate checksum of in_ip::
@@ -158,7 +158,7 @@ a vxlan-encapsulated tcp packet:
set in_ip checksum to 0 in the packet
This is similar to case 1), but l2_len is different. It is supported
- on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+ on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
Note that it can only work if outer L4 checksum is 0.
- calculate checksum of in_ip and in_tcp::
@@ -170,8 +170,8 @@ a vxlan-encapsulated tcp packet:
set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
This is similar to case 2), but l2_len is different. It is supported
- on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM and
- DEV_TX_OFFLOAD_TCP_CKSUM.
+ on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM and
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM.
Note that it can only work if outer L4 checksum is 0.
- segment inner TCP::
@@ -185,7 +185,7 @@ a vxlan-encapsulated tcp packet:
set in_tcp checksum to pseudo header without including the IP
payload length using rte_ipv4_phdr_cksum()
- This is supported on hardware advertising DEV_TX_OFFLOAD_TCP_TSO.
+ This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_TCP_TSO.
Note that it can only work if outer L4 checksum is 0.
- calculate checksum of out_ip, in_ip, in_tcp::
@@ -200,8 +200,8 @@ a vxlan-encapsulated tcp packet:
set in_ip checksum to 0 in the packet
set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
- This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM,
- DEV_TX_OFFLOAD_UDP_CKSUM and DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM.
+ This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM,
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM and RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM.
The list of flags and their precise meaning is described in the mbuf API
documentation (rte_mbuf.h). Also refer to the testpmd source code
@@ -57,7 +57,7 @@ Whenever needed and appropriate, asynchronous communication should be introduced
Avoiding lock contention is a key issue in a multi-core environment.
To address this issue, PMDs are designed to work with per-core private resources as much as possible.
-For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable.
+For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable.
In the same way, every receive queue of a port is assigned to and polled by a single logical core (lcore).
To comply with Non-Uniform Memory Access (NUMA), memory management is designed to assign to each logical core
@@ -119,7 +119,7 @@ This is also true for the pipe-line model provided all logical cores used are lo
Multiple logical cores should never share receive or transmit queues for interfaces since this would require global locks and hinder performance.
-If the PMD is ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
+If the PMD is ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
concurrently on the same tx queue without SW lock. This PMD feature found in some NICs and useful in the following use cases:
* Remove explicit spinlock in some applications where lcores are not mapped to Tx queues with 1:1 relation.
@@ -127,7 +127,7 @@ concurrently on the same tx queue without SW lock. This PMD feature found in som
* In the eventdev use case, avoid dedicating a separate TX core for transmitting and thus
enables more scaling as all workers can send the packets.
-See `Hardware Offload`_ for ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
+See `Hardware Offload`_ for ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
Device Identification, Ownership and Configuration
--------------------------------------------------
@@ -311,7 +311,7 @@ The ``dev_info->[rt]x_queue_offload_capa`` returned from ``rte_eth_dev_info_get(
The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all pure per-port and per-queue offloading capabilities.
Supported offloads can be either per-port or per-queue.
-Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
+Offloads are enabled using the existing ``RTE_ETH_TX_OFFLOAD_*`` or ``RTE_ETH_RX_OFFLOAD_*`` flags.
Any requested offloading by an application must be within the device capabilities.
Any offloading is disabled by default if it is not set in the parameter
``dev_conf->[rt]xmode.offloads`` to ``rte_eth_dev_configure()`` and
@@ -1993,23 +1993,23 @@ only matching traffic goes through.
.. table:: RSS
- +---------------+---------------------------------------------+
- | Field | Value |
- +===============+=============================================+
- | ``func`` | RSS hash function to apply |
- +---------------+---------------------------------------------+
- | ``level`` | encapsulation level for ``types`` |
- +---------------+---------------------------------------------+
- | ``types`` | specific RSS hash types (see ``ETH_RSS_*``) |
- +---------------+---------------------------------------------+
- | ``key_len`` | hash key length in bytes |
- +---------------+---------------------------------------------+
- | ``queue_num`` | number of entries in ``queue`` |
- +---------------+---------------------------------------------+
- | ``key`` | hash key |
- +---------------+---------------------------------------------+
- | ``queue`` | queue indices to use |
- +---------------+---------------------------------------------+
+ +---------------+-------------------------------------------------+
+ | Field | Value |
+ +===============+=================================================+
+ | ``func`` | RSS hash function to apply |
+ +---------------+-------------------------------------------------+
+ | ``level`` | encapsulation level for ``types`` |
+ +---------------+-------------------------------------------------+
+ | ``types`` | specific RSS hash types (see ``RTE_ETH_RSS_*``) |
+ +---------------+-------------------------------------------------+
+ | ``key_len`` | hash key length in bytes |
+ +---------------+-------------------------------------------------+
+ | ``queue_num`` | number of entries in ``queue`` |
+ +---------------+-------------------------------------------------+
+ | ``key`` | hash key |
+ +---------------+-------------------------------------------------+
+ | ``queue`` | queue indices to use |
+ +---------------+-------------------------------------------------+
Action: ``PF``
^^^^^^^^^^^^^^
@@ -569,7 +569,7 @@ created by the application is attached to the security session by the API
For Inline Crypto and Inline protocol offload, device specific defined metadata is
updated in the mbuf using ``rte_security_set_pkt_metadata()`` if
-``DEV_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
+``RTE_ETH_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
For inline protocol offloaded ingress traffic, the application can register a
pointer, ``userdata`` , in the security session. When the packet is received,
@@ -69,22 +69,16 @@ Deprecation Notices
``RTE_ETH_FLOW_MAX`` is one sample of the mentioned case, adding a new flow
type will break the ABI because of ``flex_mask[RTE_ETH_FLOW_MAX]`` array
usage in following public struct hierarchy:
- ``rte_eth_fdir_flex_conf -> rte_fdir_conf -> rte_eth_conf (in the middle)``.
+ ``rte_eth_fdir_flex_conf -> rte_eth_fdir_conf -> rte_eth_conf (in the middle)``.
Need to identify this kind of usages and fix in 20.11, otherwise this blocks
us extending existing enum/define.
One solution can be using a fixed size array instead of ``.*MAX.*`` value.
-* ethdev: Will add ``RTE_ETH_`` prefix to all ethdev macros/enums in v21.11.
- Macros will be added for backward compatibility.
- Backward compatibility macros will be removed on v22.11.
- A few old backward compatibility macros from 2013 that does not have
- proper prefix will be removed on v21.11.
-
* ethdev: The flow director API, including ``rte_eth_conf.fdir_conf`` field,
and the related structures (``rte_fdir_*`` and ``rte_eth_fdir_*``),
will be removed in DPDK 20.11.
-* ethdev: New offload flags ``DEV_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
+* ethdev: New offload flags ``RTE_ETH_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
This will allow application to enable or disable PMDs from updating
``rte_mbuf::hash::fdir``.
This scheme will allow PMDs to avoid writes to ``rte_mbuf`` fields on Rx and
@@ -446,6 +446,9 @@ ABI Changes
* bbdev: Added capability related to more comprehensive CRC options,
shifting values of the ``enum rte_bbdev_op_ldpcdec_flag_bitmasks``.
+* ethdev: All enums & macros updated to have ``RTE_ETH`` prefix and structures
+ updated to have ``rte_eth`` prefix. DPDK components updated to use new names.
+
Known Issues
------------
@@ -209,12 +209,12 @@ Where:
device will ensure the ordering. Ordering will be lost when tried in PARALLEL.
* ``--rxoffload MASK``: RX HW offload capabilities to enable/use on this port
- (bitmask of DEV_RX_OFFLOAD_* values). It is an optional parameter and
+ (bitmask of RTE_ETH_RX_OFFLOAD_* values). It is an optional parameter and
allows user to disable some of the RX HW offload capabilities.
By default all HW RX offloads are enabled.
* ``--txoffload MASK``: TX HW offload capabilities to enable/use on this port
- (bitmask of DEV_TX_OFFLOAD_* values). It is an optional parameter and
+ (bitmask of RTE_ETH_TX_OFFLOAD_* values). It is an optional parameter and
allows user to disable some of the TX HW offload capabilities.
By default all HW TX offloads are enabled.
@@ -546,7 +546,7 @@ The command line options are:
Set the hexadecimal bitmask of RX multi queue mode which can be enabled.
The default value is 0x7::
- ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG | ETH_MQ_RX_VMDQ_FLAG
+ RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG
* ``--record-core-cycles``
@@ -90,20 +90,20 @@ int dpaa_intr_disable(char *if_name);
struct usdpaa_ioctl_link_status_args_old {
/* network device node name */
char if_name[IF_NAME_MAX_LEN];
- /* link status(ETH_LINK_UP/DOWN) */
+ /* link status(RTE_ETH_LINK_UP/DOWN) */
int link_status;
};
struct usdpaa_ioctl_link_status_args {
/* network device node name */
char if_name[IF_NAME_MAX_LEN];
- /* link status(ETH_LINK_UP/DOWN) */
+ /* link status(RTE_ETH_LINK_UP/DOWN) */
int link_status;
- /* link speed (ETH_SPEED_NUM_)*/
+ /* link speed (RTE_ETH_SPEED_NUM_)*/
int link_speed;
- /* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+ /* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
int link_duplex;
- /* link autoneg (ETH_LINK_AUTONEG/FIXED)*/
+ /* link autoneg (RTE_ETH_LINK_AUTONEG/FIXED)*/
int link_autoneg;
};
@@ -111,16 +111,16 @@ struct usdpaa_ioctl_link_status_args {
struct usdpaa_ioctl_update_link_status_args {
/* network device node name */
char if_name[IF_NAME_MAX_LEN];
- /* link status(ETH_LINK_UP/DOWN) */
+ /* link status(RTE_ETH_LINK_UP/DOWN) */
int link_status;
};
struct usdpaa_ioctl_update_link_speed {
/* network device node name*/
char if_name[IF_NAME_MAX_LEN];
- /* link speed (ETH_SPEED_NUM_)*/
+ /* link speed (RTE_ETH_SPEED_NUM_)*/
int link_speed;
- /* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+ /* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
int link_duplex;
};
@@ -167,7 +167,7 @@ enum roc_npc_rss_hash_function {
struct roc_npc_action_rss {
enum roc_npc_rss_hash_function func;
uint32_t level;
- uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+ uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
uint32_t key_len; /**< Hash key length in bytes. */
uint32_t queue_num; /**< Number of entries in @p queue. */
const uint8_t *key; /**< Hash key. */
@@ -93,10 +93,10 @@ static const char *valid_arguments[] = {
};
static struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_FIXED,
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_FIXED,
};
RTE_LOG_REGISTER_DEFAULT(af_packet_logtype, NOTICE);
@@ -290,7 +290,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
static int
eth_dev_start(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
@@ -320,7 +320,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
internals->tx_queue[i].sockfd = -1;
}
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
@@ -331,7 +331,7 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
struct pmd_internals *internals = dev->data->dev_private;
- internals->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ internals->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
return 0;
}
@@ -346,9 +346,9 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_VLAN_INSERT;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
return 0;
}
@@ -163,10 +163,10 @@ static const char * const valid_arguments[] = {
};
static const struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_AUTONEG
};
/* List which tracks PMDs to facilitate sharing UMEMs across them. */
@@ -652,7 +652,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
static int
eth_dev_start(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
@@ -661,7 +661,7 @@ eth_dev_start(struct rte_eth_dev *dev)
static int
eth_dev_stop(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
@@ -736,14 +736,14 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev,
.nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */
/* ARK PMD supports all line rates, how do we indicate that here ?? */
- dev_info->speed_capa = (ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G |
- ETH_LINK_SPEED_100G);
-
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TIMESTAMP;
+ dev_info->speed_capa = (RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G |
+ RTE_ETH_LINK_SPEED_100G);
+
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
return 0;
}
@@ -154,20 +154,20 @@ static struct rte_pci_driver rte_atl_pmd = {
.remove = eth_atl_pci_remove,
};
-#define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
- | DEV_RX_OFFLOAD_IPV4_CKSUM \
- | DEV_RX_OFFLOAD_UDP_CKSUM \
- | DEV_RX_OFFLOAD_TCP_CKSUM \
- | DEV_RX_OFFLOAD_MACSEC_STRIP \
- | DEV_RX_OFFLOAD_VLAN_FILTER)
-
-#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
- | DEV_TX_OFFLOAD_IPV4_CKSUM \
- | DEV_TX_OFFLOAD_UDP_CKSUM \
- | DEV_TX_OFFLOAD_TCP_CKSUM \
- | DEV_TX_OFFLOAD_TCP_TSO \
- | DEV_TX_OFFLOAD_MACSEC_INSERT \
- | DEV_TX_OFFLOAD_MULTI_SEGS)
+#define ATL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP \
+ | RTE_ETH_RX_OFFLOAD_IPV4_CKSUM \
+ | RTE_ETH_RX_OFFLOAD_UDP_CKSUM \
+ | RTE_ETH_RX_OFFLOAD_TCP_CKSUM \
+ | RTE_ETH_RX_OFFLOAD_MACSEC_STRIP \
+ | RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+
+#define ATL_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT \
+ | RTE_ETH_TX_OFFLOAD_IPV4_CKSUM \
+ | RTE_ETH_TX_OFFLOAD_UDP_CKSUM \
+ | RTE_ETH_TX_OFFLOAD_TCP_CKSUM \
+ | RTE_ETH_TX_OFFLOAD_TCP_TSO \
+ | RTE_ETH_TX_OFFLOAD_MACSEC_INSERT \
+ | RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
#define SFP_EEPROM_SIZE 0x100
@@ -488,7 +488,7 @@ atl_dev_start(struct rte_eth_dev *dev)
/* set adapter started */
hw->adapter_stopped = 0;
- if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+ if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
PMD_INIT_LOG(ERR,
"Invalid link_speeds for port %u, fix speed not supported",
dev->data->port_id);
@@ -655,18 +655,18 @@ atl_dev_set_link_up(struct rte_eth_dev *dev)
uint32_t link_speeds = dev->data->dev_conf.link_speeds;
uint32_t speed_mask = 0;
- if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
speed_mask = hw->aq_nic_cfg->link_speed_msk;
} else {
- if (link_speeds & ETH_LINK_SPEED_10G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_10G)
speed_mask |= AQ_NIC_RATE_10G;
- if (link_speeds & ETH_LINK_SPEED_5G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_5G)
speed_mask |= AQ_NIC_RATE_5G;
- if (link_speeds & ETH_LINK_SPEED_1G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_1G)
speed_mask |= AQ_NIC_RATE_1G;
- if (link_speeds & ETH_LINK_SPEED_2_5G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_2_5G)
speed_mask |= AQ_NIC_RATE_2G5;
- if (link_speeds & ETH_LINK_SPEED_100M)
+ if (link_speeds & RTE_ETH_LINK_SPEED_100M)
speed_mask |= AQ_NIC_RATE_100M;
}
@@ -1127,10 +1127,10 @@ atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
- dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
- dev_info->speed_capa |= ETH_LINK_SPEED_100M;
- dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
- dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
return 0;
}
@@ -1175,10 +1175,10 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
u32 fc = AQ_NIC_FC_OFF;
int err = 0;
- link.link_status = ETH_LINK_DOWN;
+ link.link_status = RTE_ETH_LINK_DOWN;
link.link_speed = 0;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = hw->is_autoneg ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
memset(&old, 0, sizeof(old));
/* load old link status */
@@ -1198,8 +1198,8 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
return 0;
}
- link.link_status = ETH_LINK_UP;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link.link_speed = hw->aq_link_status.mbps;
rte_eth_linkstatus_set(dev, &link);
@@ -1333,7 +1333,7 @@ atl_dev_link_status_print(struct rte_eth_dev *dev)
PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
(int)(dev->data->port_id),
(unsigned int)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -1532,13 +1532,13 @@ atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
hw->aq_fw_ops->get_flow_control(hw, &fc);
if (fc == AQ_NIC_FC_OFF)
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (fc & AQ_NIC_FC_RX)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (fc & AQ_NIC_FC_TX)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
return 0;
}
@@ -1553,13 +1553,13 @@ atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
if (hw->aq_fw_ops->set_flow_control == NULL)
return -ENOTSUP;
- if (fc_conf->mode == RTE_FC_NONE)
+ if (fc_conf->mode == RTE_ETH_FC_NONE)
hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
- else if (fc_conf->mode == RTE_FC_RX_PAUSE)
+ else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
- else if (fc_conf->mode == RTE_FC_TX_PAUSE)
+ else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
- else if (fc_conf->mode == RTE_FC_FULL)
+ else if (fc_conf->mode == RTE_ETH_FC_FULL)
hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
if (old_flow_control != hw->aq_nic_cfg->flow_control)
@@ -1727,14 +1727,14 @@ atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
PMD_INIT_FUNC_TRACE();
- ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
+ ret = atl_enable_vlan_filter(dev, mask & RTE_ETH_VLAN_FILTER_MASK);
- cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
+ cfg->vlan_strip = !!(mask & RTE_ETH_VLAN_STRIP_MASK);
for (i = 0; i < dev->data->nb_rx_queues; i++)
hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
- if (mask & ETH_VLAN_EXTEND_MASK)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK)
ret = -ENOTSUP;
return ret;
@@ -1750,10 +1750,10 @@ atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
PMD_INIT_FUNC_TRACE();
switch (vlan_type) {
- case ETH_VLAN_TYPE_INNER:
+ case RTE_ETH_VLAN_TYPE_INNER:
hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
break;
- case ETH_VLAN_TYPE_OUTER:
+ case RTE_ETH_VLAN_TYPE_OUTER:
hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
break;
default:
@@ -11,15 +11,15 @@
#include "hw_atl/hw_atl_utils.h"
#define ATL_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_EX | \
+ RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
#define ATL_DEV_PRIVATE_TO_HW(adapter) \
(&((struct atl_adapter *)adapter)->hw)
@@ -145,10 +145,10 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_IPV4_CKSUM;
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
- (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
/* allocate memory for the software ring */
@@ -1998,9 +1998,9 @@ avp_dev_configure(struct rte_eth_dev *eth_dev)
/* Setup required number of queues */
_avp_set_queue_counts(eth_dev);
- mask = (ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ mask = (RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK);
ret = avp_vlan_offload_set(eth_dev, mask);
if (ret < 0) {
PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
@@ -2140,8 +2140,8 @@ avp_dev_link_update(struct rte_eth_dev *eth_dev,
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct rte_eth_link *link = ð_dev->data->dev_link;
- link->link_speed = ETH_SPEED_NUM_10G;
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link->link_status = !!(avp->flags & AVP_F_LINKUP);
return -1;
@@ -2191,8 +2191,8 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
}
return 0;
@@ -2205,9 +2205,9 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
uint64_t offloads = dev_conf->rxmode.offloads;
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
- if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
else
avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
@@ -2216,13 +2216,13 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
}
}
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
}
@@ -840,11 +840,11 @@ static void axgbe_rss_options(struct axgbe_port *pdata)
pdata->rss_hf = rss_conf->rss_hf;
rss_hf = rss_conf->rss_hf;
- if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+ if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
- if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+ if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
- if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+ if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
}
@@ -326,7 +326,7 @@ axgbe_dev_configure(struct rte_eth_dev *dev)
struct axgbe_port *pdata = dev->data->dev_private;
/* Checksum offload to hardware */
pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CHECKSUM;
+ RTE_ETH_RX_OFFLOAD_CHECKSUM;
return 0;
}
@@ -335,9 +335,9 @@ axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
{
struct axgbe_port *pdata = dev->data->dev_private;
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
pdata->rss_enable = 1;
- else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+ else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
pdata->rss_enable = 0;
else
return -1;
@@ -385,7 +385,7 @@ axgbe_dev_start(struct rte_eth_dev *dev)
rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
- if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
max_pkt_len > pdata->rx_buf_size)
dev_data->scattered_rx = 1;
@@ -521,8 +521,8 @@ axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
continue;
pdata->rss_table[i] = reta_conf[idx].reta[shift];
@@ -552,8 +552,8 @@ axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
continue;
reta_conf[idx].reta[shift] = pdata->rss_table[i];
@@ -590,13 +590,13 @@ axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
- if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+ if (pdata->rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
if (pdata->rss_hf &
- (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+ (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
if (pdata->rss_hf &
- (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+ (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
/* Set the RSS options */
@@ -765,7 +765,7 @@ axgbe_dev_link_update(struct rte_eth_dev *dev,
link.link_status = pdata->phy_link;
link.link_speed = pdata->phy_speed;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
ret = rte_eth_linkstatus_set(dev, &link);
if (ret == -1)
PMD_DRV_LOG(ERR, "No change in link status\n");
@@ -1208,24 +1208,24 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
- dev_info->speed_capa = ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_KEEP_CRC;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
if (pdata->hw_feat.rss) {
dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
@@ -1262,13 +1262,13 @@ axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
fc.autoneg = pdata->pause_autoneg;
if (pdata->rx_pause && pdata->tx_pause)
- fc.mode = RTE_FC_FULL;
+ fc.mode = RTE_ETH_FC_FULL;
else if (pdata->rx_pause)
- fc.mode = RTE_FC_RX_PAUSE;
+ fc.mode = RTE_ETH_FC_RX_PAUSE;
else if (pdata->tx_pause)
- fc.mode = RTE_FC_TX_PAUSE;
+ fc.mode = RTE_ETH_FC_TX_PAUSE;
else
- fc.mode = RTE_FC_NONE;
+ fc.mode = RTE_ETH_FC_NONE;
fc_conf->high_water = (1024 + (fc.low_water[0] << 9)) / 1024;
fc_conf->low_water = (1024 + (fc.high_water[0] << 9)) / 1024;
@@ -1298,13 +1298,13 @@ axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
AXGMAC_IOWRITE(pdata, reg, reg_val);
fc.mode = fc_conf->mode;
- if (fc.mode == RTE_FC_FULL) {
+ if (fc.mode == RTE_ETH_FC_FULL) {
pdata->tx_pause = 1;
pdata->rx_pause = 1;
- } else if (fc.mode == RTE_FC_RX_PAUSE) {
+ } else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
pdata->tx_pause = 0;
pdata->rx_pause = 1;
- } else if (fc.mode == RTE_FC_TX_PAUSE) {
+ } else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
pdata->tx_pause = 1;
pdata->rx_pause = 0;
} else {
@@ -1386,15 +1386,15 @@ axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
fc.mode = pfc_conf->fc.mode;
- if (fc.mode == RTE_FC_FULL) {
+ if (fc.mode == RTE_ETH_FC_FULL) {
pdata->tx_pause = 1;
pdata->rx_pause = 1;
AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
- } else if (fc.mode == RTE_FC_RX_PAUSE) {
+ } else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
pdata->tx_pause = 0;
pdata->rx_pause = 1;
AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
- } else if (fc.mode == RTE_FC_TX_PAUSE) {
+ } else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
pdata->tx_pause = 1;
pdata->rx_pause = 0;
AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
@@ -1830,8 +1830,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
switch (vlan_type) {
- case ETH_VLAN_TYPE_INNER:
- PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_INNER\n");
+ case RTE_ETH_VLAN_TYPE_INNER:
+ PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_INNER\n");
if (qinq) {
if (tpid != 0x8100 && tpid != 0x88a8)
PMD_DRV_LOG(ERR,
@@ -1848,8 +1848,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
"Inner type not supported in single tag\n");
}
break;
- case ETH_VLAN_TYPE_OUTER:
- PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_OUTER\n");
+ case RTE_ETH_VLAN_TYPE_OUTER:
+ PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER\n");
if (qinq) {
PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
/*Enable outer VLAN tag*/
@@ -1866,11 +1866,11 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
"tag supported 0x8100/0x88A8\n");
}
break;
- case ETH_VLAN_TYPE_MAX:
- PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_MAX\n");
+ case RTE_ETH_VLAN_TYPE_MAX:
+ PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_MAX\n");
break;
- case ETH_VLAN_TYPE_UNKNOWN:
- PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_UNKNOWN\n");
+ case RTE_ETH_VLAN_TYPE_UNKNOWN:
+ PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN\n");
break;
}
return 0;
@@ -1904,8 +1904,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
pdata->eth_dev->device->name);
pdata->hw_if.enable_rx_vlan_stripping(pdata);
@@ -1915,8 +1915,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
pdata->hw_if.disable_rx_vlan_stripping(pdata);
}
}
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
pdata->eth_dev->device->name);
pdata->hw_if.enable_rx_vlan_filtering(pdata);
@@ -1926,14 +1926,14 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
pdata->hw_if.disable_rx_vlan_filtering(pdata);
}
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
axgbe_vlan_extend_enable(pdata);
/* Set global registers with default ethertype*/
- axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
RTE_ETHER_TYPE_VLAN);
- axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+ axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
RTE_ETHER_TYPE_VLAN);
} else {
PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
@@ -97,12 +97,12 @@
/* Receive Side Scaling */
#define AXGBE_RSS_OFFLOAD ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP)
#define AXGBE_RSS_HASH_KEY_SIZE 40
#define AXGBE_RSS_MAX_TABLE_SIZE 256
@@ -597,7 +597,7 @@ static void axgbe_an73_state_machine(struct axgbe_port *pdata)
pdata->an_int = 0;
axgbe_an73_clear_interrupts(pdata);
pdata->eth_dev->data->dev_link.link_status =
- ETH_LINK_DOWN;
+ RTE_ETH_LINK_DOWN;
} else if (pdata->an_state == AXGBE_AN_ERROR) {
PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
cur_state);
@@ -75,7 +75,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
(DMA_CH_INC * rxq->queue_id));
rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
DMA_CH_RDTR_LO);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -286,7 +286,7 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
mbuf->vlan_tci =
AXGMAC_GET_BITS_LE(desc->write.desc0,
RX_NORMAL_DESC0, OVT);
- if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
else
mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
@@ -430,7 +430,7 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
mbuf->vlan_tci =
AXGMAC_GET_BITS_LE(desc->write.desc0,
RX_NORMAL_DESC0, OVT);
- if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
else
mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
@@ -94,14 +94,14 @@ bnx2x_link_update(struct rte_eth_dev *dev)
link.link_speed = sc->link_vars.line_speed;
switch (sc->link_vars.duplex) {
case DUPLEX_FULL:
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case DUPLEX_HALF:
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
}
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
link.link_status = sc->link_vars.link_up;
return rte_eth_linkstatus_set(dev, &link);
@@ -408,7 +408,7 @@ bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_comple
if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
"VF device is no longer operational");
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
}
return ret;
@@ -534,7 +534,7 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
- dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G;
dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
@@ -669,7 +669,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
bnx2x_load_firmware(sc);
assert(sc->firmware);
- if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
sc->udp_rss = 1;
sc->rx_budget = BNX2X_RX_BUDGET;
@@ -569,37 +569,37 @@ struct bnxt_rep_info {
#define BNXT_FW_STATUS_SHUTDOWN 0x100000
#define BNXT_ETH_RSS_SUPPORT ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_LEVEL_MASK)
-
-#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
- DEV_TX_OFFLOAD_GRE_TNL_TSO | \
- DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
- DEV_TX_OFFLOAD_QINQ_INSERT | \
- DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_IPV4_CKSUM | \
- DEV_RX_OFFLOAD_UDP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_CKSUM | \
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_RX_OFFLOAD_KEEP_CRC | \
- DEV_RX_OFFLOAD_VLAN_EXTEND | \
- DEV_RX_OFFLOAD_TCP_LRO | \
- DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_RSS_HASH)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_LEVEL_MASK)
+
+#define BNXT_DEV_TX_OFFLOAD_SUPPORT (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define BNXT_DEV_RX_OFFLOAD_SUPPORT (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+ RTE_ETH_RX_OFFLOAD_TCP_LRO | \
+ RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
@@ -426,7 +426,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
goto err_out;
/* Alloc RSS context only if RSS mode is enabled */
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
int j, nr_ctxs = bnxt_rss_ctxts(bp);
/* RSS table size in Thor is 512.
@@ -458,7 +458,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
* setting is not available at this time, it will not be
* configured correctly in the CFA.
*/
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
vnic->vlan_strip = true;
else
vnic->vlan_strip = false;
@@ -493,7 +493,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
- (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) ?
+ (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
true : false);
if (rc)
goto err_out;
@@ -923,35 +923,35 @@ uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
link_speed = bp->link_info->support_pam4_speeds;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
- speed_capa |= ETH_LINK_SPEED_100M;
+ speed_capa |= RTE_ETH_LINK_SPEED_100M;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
- speed_capa |= ETH_LINK_SPEED_100M_HD;
+ speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
- speed_capa |= ETH_LINK_SPEED_1G;
+ speed_capa |= RTE_ETH_LINK_SPEED_1G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
- speed_capa |= ETH_LINK_SPEED_2_5G;
+ speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
- speed_capa |= ETH_LINK_SPEED_10G;
+ speed_capa |= RTE_ETH_LINK_SPEED_10G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
- speed_capa |= ETH_LINK_SPEED_20G;
+ speed_capa |= RTE_ETH_LINK_SPEED_20G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
- speed_capa |= ETH_LINK_SPEED_25G;
+ speed_capa |= RTE_ETH_LINK_SPEED_25G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
- speed_capa |= ETH_LINK_SPEED_40G;
+ speed_capa |= RTE_ETH_LINK_SPEED_40G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
- speed_capa |= ETH_LINK_SPEED_50G;
+ speed_capa |= RTE_ETH_LINK_SPEED_50G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
- speed_capa |= ETH_LINK_SPEED_100G;
+ speed_capa |= RTE_ETH_LINK_SPEED_100G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
- speed_capa |= ETH_LINK_SPEED_50G;
+ speed_capa |= RTE_ETH_LINK_SPEED_50G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
- speed_capa |= ETH_LINK_SPEED_100G;
+ speed_capa |= RTE_ETH_LINK_SPEED_100G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
- speed_capa |= ETH_LINK_SPEED_200G;
+ speed_capa |= RTE_ETH_LINK_SPEED_200G;
if (bp->link_info->auto_mode ==
HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
- speed_capa |= ETH_LINK_SPEED_FIXED;
+ speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
return speed_capa;
}
@@ -995,14 +995,14 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_STRIP;
- dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
dev_info->tx_queue_offload_capa;
if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
@@ -1049,8 +1049,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
*/
/* VMDq resources */
- vpool = 64; /* ETH_64_POOLS */
- vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
+ vpool = 64; /* RTE_ETH_64_POOLS */
+ vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */
for (i = 0; i < 4; vpool >>= 1, i++) {
if (max_vnics > vpool) {
for (j = 0; j < 5; vrxq >>= 1, j++) {
@@ -1145,15 +1145,15 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
(uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
goto resource_error;
- if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
+ if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) &&
bp->max_vnics < eth_dev->data->nb_rx_queues)
goto resource_error;
bp->rx_cp_nr_rings = bp->rx_nr_rings;
bp->tx_cp_nr_rings = bp->tx_nr_rings;
- if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
@@ -1182,7 +1182,7 @@ void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
eth_dev->data->port_id,
(uint32_t)link->link_speed,
- (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ (link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
PMD_DRV_LOG(INFO, "Port %d Link Down\n",
@@ -1199,10 +1199,10 @@ static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
uint16_t buf_size;
int i;
- if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return 1;
- if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)
+ if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
return 1;
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -1247,15 +1247,15 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
* a limited subset have been enabled.
*/
if (eth_dev->data->dev_conf.rxmode.offloads &
- ~(DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
- DEV_RX_OFFLOAD_RSS_HASH |
- DEV_RX_OFFLOAD_VLAN_FILTER))
+ ~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER))
goto use_scalar_rx;
#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
@@ -1307,7 +1307,7 @@ bnxt_transmit_function(struct rte_eth_dev *eth_dev)
* or tx offloads.
*/
if (eth_dev->data->scattered_rx ||
- (offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) ||
+ (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) ||
BNXT_TRUFLOW_EN(bp))
goto use_scalar_tx;
@@ -1608,10 +1608,10 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
bnxt_link_update_op(eth_dev, 1);
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
- vlan_mask |= ETH_VLAN_FILTER_MASK;
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
- vlan_mask |= ETH_VLAN_STRIP_MASK;
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+ vlan_mask |= RTE_ETH_VLAN_FILTER_MASK;
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ vlan_mask |= RTE_ETH_VLAN_STRIP_MASK;
rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
if (rc)
goto error;
@@ -1833,8 +1833,8 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
/* Retrieve link info from hardware */
rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc) {
- new.link_speed = ETH_LINK_SPEED_100M;
- new.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new.link_speed = RTE_ETH_LINK_SPEED_100M;
+ new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR,
"Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
@@ -2028,7 +2028,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
if (!vnic->rss_table)
return -EINVAL;
- if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+ if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
return -EINVAL;
if (reta_size != tbl_size) {
@@ -2041,8 +2041,8 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
for (i = 0; i < reta_size; i++) {
struct bnxt_rx_queue *rxq;
- idx = i / RTE_RETA_GROUP_SIZE;
- sft = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ sft = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << sft)))
continue;
@@ -2095,8 +2095,8 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
}
for (idx = 0, i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- sft = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ sft = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << sft)) {
uint16_t qid;
@@ -2134,7 +2134,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
* If RSS enablement were different than dev_configure,
* then return -EINVAL
*/
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
if (!rss_conf->rss_hf)
PMD_DRV_LOG(ERR, "Hash type NONE\n");
} else {
@@ -2152,7 +2152,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
vnic->hash_mode =
bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
- ETH_RSS_LEVEL(rss_conf->rss_hf));
+ RTE_ETH_RSS_LEVEL(rss_conf->rss_hf));
/*
* If hashkey is not specified, use the previously configured
@@ -2197,30 +2197,30 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
hash_types = vnic->hash_type;
rss_conf->rss_hf = 0;
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
- rss_conf->rss_hf |= ETH_RSS_IPV4;
+ rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
- rss_conf->rss_hf |= ETH_RSS_IPV6;
+ rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
}
@@ -2260,17 +2260,17 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
fc_conf->autoneg = 1;
switch (bp->link_info->pause) {
case 0:
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
break;
case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
break;
}
return 0;
@@ -2293,11 +2293,11 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
}
switch (fc_conf->mode) {
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
bp->link_info->auto_pause = 0;
bp->link_info->force_pause = 0;
break;
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
if (fc_conf->autoneg) {
bp->link_info->auto_pause =
HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
@@ -2308,7 +2308,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
}
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
if (fc_conf->autoneg) {
bp->link_info->auto_pause =
HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
@@ -2319,7 +2319,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
}
break;
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
if (fc_conf->autoneg) {
bp->link_info->auto_pause =
HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
@@ -2350,7 +2350,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
return rc;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
if (bp->vxlan_port_cnt) {
PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
@@ -2364,7 +2364,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
tunnel_type =
HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
break;
- case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
if (bp->geneve_port_cnt) {
PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
@@ -2413,7 +2413,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
return rc;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
if (!bp->vxlan_port_cnt) {
PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
@@ -2430,7 +2430,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
port = bp->vxlan_fw_dst_port_id;
break;
- case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
if (!bp->geneve_port_cnt) {
PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
@@ -2608,7 +2608,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
int rc;
vnic = BNXT_GET_DEFAULT_VNIC(bp);
- if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
+ if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
/* Remove any VLAN filters programmed */
for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
bnxt_del_vlan_filter(bp, i);
@@ -2628,7 +2628,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
bnxt_add_vlan_filter(bp, 0);
}
PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
- !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
+ !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER));
return 0;
}
@@ -2641,7 +2641,7 @@ static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
/* Destroy vnic filters and vnic */
if (bp->eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER) {
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
bnxt_del_vlan_filter(bp, i);
}
@@ -2680,7 +2680,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
return rc;
if (bp->eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER) {
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
rc = bnxt_add_vlan_filter(bp, 0);
if (rc)
return rc;
@@ -2698,7 +2698,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
return rc;
PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
- !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
+ !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP));
return rc;
}
@@ -2718,22 +2718,22 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
if (!dev->data->dev_started)
return 0;
- if (mask & ETH_VLAN_FILTER_MASK) {
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
/* Enable or disable VLAN filtering */
rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
if (rc)
return rc;
}
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
if (rc)
return rc;
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
else
PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
@@ -2748,10 +2748,10 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
{
struct bnxt *bp = dev->data->dev_private;
int qinq = dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_EXTEND;
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
- if (vlan_type != ETH_VLAN_TYPE_INNER &&
- vlan_type != ETH_VLAN_TYPE_OUTER) {
+ if (vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+ vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
PMD_DRV_LOG(ERR,
"Unsupported vlan type.");
return -EINVAL;
@@ -2763,7 +2763,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
return -EINVAL;
}
- if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
switch (tpid) {
case RTE_ETHER_TYPE_QINQ:
bp->outer_tpid_bd =
@@ -2791,7 +2791,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
}
bp->outer_tpid_bd |= tpid;
PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
- } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+ } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
PMD_DRV_LOG(ERR,
"Can accelerate only outer vlan in QinQ\n");
return -EINVAL;
@@ -2831,7 +2831,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
bnxt_del_dflt_mac_filter(bp, vnic);
memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
/* This filter will allow only untagged packets */
rc = bnxt_add_vlan_filter(bp, 0);
} else {
@@ -6556,4 +6556,4 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE);
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
+
@@ -978,7 +978,7 @@ static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic,
}
}
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
vnic->vlan_strip = true;
else
vnic->vlan_strip = false;
@@ -1177,7 +1177,7 @@ bnxt_vnic_rss_cfg_update(struct bnxt *bp,
}
/* If RSS types is 0, use a best effort configuration */
- types = rss->types ? rss->types : ETH_RSS_IPV4;
+ types = rss->types ? rss->types : RTE_ETH_RSS_IPV4;
hash_type = bnxt_rte_to_hwrm_hash_types(types);
@@ -1322,7 +1322,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
rxq = bp->rx_queues[act_q->index];
- if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
+ if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && rxq &&
vnic->fw_vnic_id != INVALID_HW_RING_ID)
goto use_vnic;
@@ -628,7 +628,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
uint16_t j = dst_id - 1;
//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
- if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
+ if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) &&
conf->pool_map[j].pools & (1UL << j)) {
PMD_DRV_LOG(DEBUG,
"Add vlan %u to vmdq pool %u\n",
@@ -2979,12 +2979,12 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
{
uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
- if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
+ if ((conf_link_speed & RTE_ETH_LINK_SPEED_FIXED) == RTE_ETH_LINK_SPEED_AUTONEG)
return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
switch (conf_link_speed) {
- case ETH_LINK_SPEED_10M_HD:
- case ETH_LINK_SPEED_100M_HD:
+ case RTE_ETH_LINK_SPEED_10M_HD:
+ case RTE_ETH_LINK_SPEED_100M_HD:
/* FALLTHROUGH */
return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
}
@@ -3001,51 +3001,51 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
{
uint16_t eth_link_speed = 0;
- if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
- return ETH_LINK_SPEED_AUTONEG;
+ if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
+ return RTE_ETH_LINK_SPEED_AUTONEG;
- switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
- case ETH_LINK_SPEED_100M:
- case ETH_LINK_SPEED_100M_HD:
+ switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) {
+ case RTE_ETH_LINK_SPEED_100M:
+ case RTE_ETH_LINK_SPEED_100M_HD:
/* FALLTHROUGH */
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
break;
- case ETH_LINK_SPEED_1G:
+ case RTE_ETH_LINK_SPEED_1G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
break;
- case ETH_LINK_SPEED_2_5G:
+ case RTE_ETH_LINK_SPEED_2_5G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
break;
- case ETH_LINK_SPEED_10G:
+ case RTE_ETH_LINK_SPEED_10G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
break;
- case ETH_LINK_SPEED_20G:
+ case RTE_ETH_LINK_SPEED_20G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
break;
- case ETH_LINK_SPEED_25G:
+ case RTE_ETH_LINK_SPEED_25G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
break;
- case ETH_LINK_SPEED_40G:
+ case RTE_ETH_LINK_SPEED_40G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
break;
- case ETH_LINK_SPEED_50G:
+ case RTE_ETH_LINK_SPEED_50G:
eth_link_speed = pam4_link ?
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
break;
- case ETH_LINK_SPEED_100G:
+ case RTE_ETH_LINK_SPEED_100G:
eth_link_speed = pam4_link ?
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
break;
- case ETH_LINK_SPEED_200G:
+ case RTE_ETH_LINK_SPEED_200G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
break;
@@ -3058,11 +3058,11 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
return eth_link_speed;
}
-#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
- ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
- ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
- ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
+#define BNXT_SUPPORTED_SPEEDS (RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | \
+ RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G | RTE_ETH_LINK_SPEED_25G | \
+ RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_50G | \
+ RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_200G)
static int bnxt_validate_link_speed(struct bnxt *bp)
{
@@ -3071,13 +3071,13 @@ static int bnxt_validate_link_speed(struct bnxt *bp)
uint32_t link_speed_capa;
uint32_t one_speed;
- if (link_speed == ETH_LINK_SPEED_AUTONEG)
+ if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
return 0;
link_speed_capa = bnxt_get_speed_capabilities(bp);
- if (link_speed & ETH_LINK_SPEED_FIXED) {
- one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
+ if (link_speed & RTE_ETH_LINK_SPEED_FIXED) {
+ one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED;
if (one_speed & (one_speed - 1)) {
PMD_DRV_LOG(ERR,
@@ -3107,71 +3107,71 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
{
uint16_t ret = 0;
- if (link_speed == ETH_LINK_SPEED_AUTONEG) {
+ if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
if (bp->link_info->support_speeds)
return bp->link_info->support_speeds;
link_speed = BNXT_SUPPORTED_SPEEDS;
}
- if (link_speed & ETH_LINK_SPEED_100M)
+ if (link_speed & RTE_ETH_LINK_SPEED_100M)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
- if (link_speed & ETH_LINK_SPEED_100M_HD)
+ if (link_speed & RTE_ETH_LINK_SPEED_100M_HD)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
- if (link_speed & ETH_LINK_SPEED_1G)
+ if (link_speed & RTE_ETH_LINK_SPEED_1G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
- if (link_speed & ETH_LINK_SPEED_2_5G)
+ if (link_speed & RTE_ETH_LINK_SPEED_2_5G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
- if (link_speed & ETH_LINK_SPEED_10G)
+ if (link_speed & RTE_ETH_LINK_SPEED_10G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
- if (link_speed & ETH_LINK_SPEED_20G)
+ if (link_speed & RTE_ETH_LINK_SPEED_20G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
- if (link_speed & ETH_LINK_SPEED_25G)
+ if (link_speed & RTE_ETH_LINK_SPEED_25G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
- if (link_speed & ETH_LINK_SPEED_40G)
+ if (link_speed & RTE_ETH_LINK_SPEED_40G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
- if (link_speed & ETH_LINK_SPEED_50G)
+ if (link_speed & RTE_ETH_LINK_SPEED_50G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
- if (link_speed & ETH_LINK_SPEED_100G)
+ if (link_speed & RTE_ETH_LINK_SPEED_100G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
- if (link_speed & ETH_LINK_SPEED_200G)
+ if (link_speed & RTE_ETH_LINK_SPEED_200G)
ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
return ret;
}
static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
{
- uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
+ uint32_t eth_link_speed = RTE_ETH_SPEED_NUM_NONE;
switch (hw_link_speed) {
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
- eth_link_speed = ETH_SPEED_NUM_100M;
+ eth_link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
- eth_link_speed = ETH_SPEED_NUM_1G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
- eth_link_speed = ETH_SPEED_NUM_2_5G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_2_5G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
- eth_link_speed = ETH_SPEED_NUM_10G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
- eth_link_speed = ETH_SPEED_NUM_20G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
- eth_link_speed = ETH_SPEED_NUM_25G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
- eth_link_speed = ETH_SPEED_NUM_40G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
- eth_link_speed = ETH_SPEED_NUM_50G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_50G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
- eth_link_speed = ETH_SPEED_NUM_100G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_100G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
- eth_link_speed = ETH_SPEED_NUM_200G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_200G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
@@ -3184,16 +3184,16 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
{
- uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+ uint16_t eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
switch (hw_link_duplex) {
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
/* FALLTHROUGH */
- eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+ eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
- eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+ eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
default:
PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
@@ -3222,12 +3222,12 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
link->link_speed =
bnxt_parse_hw_link_speed(link_info->link_speed);
else
- link->link_speed = ETH_SPEED_NUM_NONE;
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
link->link_status = link_info->link_up;
link->link_autoneg = link_info->auto_mode ==
HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
- ETH_LINK_FIXED : ETH_LINK_AUTONEG;
+ RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG;
exit:
return rc;
}
@@ -3253,7 +3253,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
if (BNXT_CHIP_P5(bp) &&
- dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
+ dev_conf->link_speeds == RTE_ETH_LINK_SPEED_40G) {
/* 40G is not supported as part of media auto detect.
* The speed should be forced and autoneg disabled
* to configure 40G speed.
@@ -3344,7 +3344,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
HWRM_CHECK_RESULT();
- bp->vlan = rte_le_to_cpu_16(resp->vlan) & ETH_VLAN_ID_MAX;
+ bp->vlan = rte_le_to_cpu_16(resp->vlan) & RTE_ETH_VLAN_ID_MAX;
svif_info = rte_le_to_cpu_16(resp->svif_info);
if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
@@ -537,7 +537,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
@@ -187,7 +187,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
rx_ring_info->rx_ring_struct->ring_size *
AGG_RING_SIZE_FACTOR)) : 0;
- if (rx_ring_info && (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+ if (rx_ring_info && (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
int tpa_max = BNXT_TPA_MAX_AGGS(bp);
tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info);
@@ -283,7 +283,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
ag_bitmap_start, ag_bitmap_len);
/* TPA info */
- if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
rx_ring_info->tpa_info =
((struct bnxt_tpa_info *)
((char *)mz->addr + tpa_info_start));
@@ -52,13 +52,13 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
bp->nr_vnics = 0;
/* Multi-queue mode */
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
switch (dev_conf->rxmode.mq_mode) {
- case ETH_MQ_RX_VMDQ_RSS:
- case ETH_MQ_RX_VMDQ_ONLY:
- case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_ONLY:
+ case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
/* FALLTHROUGH */
/* ETH_8/64_POOLs */
pools = conf->nb_queue_pools;
@@ -66,14 +66,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
max_pools = RTE_MIN(bp->max_vnics,
RTE_MIN(bp->max_l2_ctx,
RTE_MIN(bp->max_rsscos_ctx,
- ETH_64_POOLS)));
+ RTE_ETH_64_POOLS)));
PMD_DRV_LOG(DEBUG,
"pools = %u max_pools = %u\n",
pools, max_pools);
if (pools > max_pools)
pools = max_pools;
break;
- case ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_RSS:
pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
break;
default:
@@ -111,7 +111,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
ring_idx, rxq, i, vnic);
}
if (i == 0) {
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
bp->eth_dev->data->promiscuous = 1;
vnic->flags |= BNXT_VNIC_INFO_PROMISC;
}
@@ -121,8 +121,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
vnic->end_grp_id = end_grp_id;
if (i) {
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
- !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
+ !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
vnic->rss_dflt_cr = true;
goto skip_filter_allocation;
}
@@ -147,14 +147,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
bp->rx_num_qs_per_vnic = nb_q_per_grp;
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
if (bp->flags & BNXT_FLAG_UPDATE_HASH)
bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
for (i = 0; i < bp->nr_vnics; i++) {
- uint32_t lvl = ETH_RSS_LEVEL(rss->rss_hf);
+ uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
vnic = &bp->vnic_info[i];
vnic->hash_type =
@@ -363,7 +363,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
- if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -478,7 +478,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
vnic = rxq->vnic;
if (BNXT_HAS_RING_GRPS(bp)) {
@@ -549,7 +549,7 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rxq->rx_started = false;
PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
if (BNXT_HAS_RING_GRPS(bp))
vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
@@ -566,8 +566,8 @@ bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
dev_conf = &rxq->bp->eth_dev->data->dev_conf;
offloads = dev_conf->rxmode.offloads;
- outer_cksum_enabled = !!(offloads & (DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM));
+ outer_cksum_enabled = !!(offloads & (RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM));
/* Initialize ol_flags table. */
pt = rxr->ol_flags_table;
@@ -416,7 +416,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
} while (nb_tx_pkts < ring_mask);
if (nb_tx_pkts) {
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp_vec(txq, nb_tx_pkts);
@@ -96,7 +96,7 @@ bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
}
/*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
* is enabled.
*/
static inline void
@@ -352,7 +352,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
} while (nb_tx_pkts < ring_mask);
if (nb_tx_pkts) {
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp_vec(txq, nb_tx_pkts);
@@ -333,7 +333,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
} while (nb_tx_pkts < ring_mask);
if (nb_tx_pkts) {
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp_vec(txq, nb_tx_pkts);
@@ -353,7 +353,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
}
/*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
* is enabled.
*/
static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
@@ -479,7 +479,7 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
} while (nb_tx_pkts < ring_mask);
if (nb_tx_pkts) {
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
bnxt_tx_cmp_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp(txq, nb_tx_pkts);
@@ -239,17 +239,17 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
{
uint16_t hwrm_type = 0;
- if (rte_type & ETH_RSS_IPV4)
+ if (rte_type & RTE_ETH_RSS_IPV4)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
- if (rte_type & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
- if (rte_type & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
- if (rte_type & ETH_RSS_IPV6)
+ if (rte_type & RTE_ETH_RSS_IPV6)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
- if (rte_type & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
- if (rte_type & ETH_RSS_NONFRAG_IPV6_UDP)
+ if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
return hwrm_type;
@@ -258,11 +258,11 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl)
{
uint32_t mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
- bool l3 = (hash_f & (ETH_RSS_IPV4 | ETH_RSS_IPV6));
- bool l4 = (hash_f & (ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV6_TCP));
+ bool l3 = (hash_f & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6));
+ bool l4 = (hash_f & (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP));
bool l3_only = l3 && !l4;
bool l3_and_l4 = l3 && l4;
@@ -307,16 +307,16 @@ uint64_t bnxt_hwrm_to_rte_rss_level(struct bnxt *bp, uint32_t mode)
* return default hash mode.
*/
if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS))
- return ETH_RSS_LEVEL_PMD_DEFAULT;
+ return RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 ||
mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4)
- rss_level |= ETH_RSS_LEVEL_OUTERMOST;
+ rss_level |= RTE_ETH_RSS_LEVEL_OUTERMOST;
else if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 ||
mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4)
- rss_level |= ETH_RSS_LEVEL_INNERMOST;
+ rss_level |= RTE_ETH_RSS_LEVEL_INNERMOST;
else
- rss_level |= ETH_RSS_LEVEL_PMD_DEFAULT;
+ rss_level |= RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
return rss_level;
}
@@ -421,18 +421,18 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
if (vf >= bp->pdev->max_vfs)
return -EINVAL;
- if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) {
PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
return -ENOTSUP;
}
/* Is this really the correct mapping? VFd seems to think it is. */
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
flag |= BNXT_VNIC_INFO_PROMISC;
- if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
flag |= BNXT_VNIC_INFO_BCAST;
- if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST;
if (on)
@@ -167,8 +167,8 @@ struct bond_dev_private {
struct rte_eth_desc_lim tx_desc_lim; /**< Tx descriptor limits */
uint16_t reta_size;
- struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 /
- RTE_RETA_GROUP_SIZE];
+ struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_512 /
+ RTE_ETH_RETA_GROUP_SIZE];
uint8_t rss_key[52]; /**< 52-byte hash key buffer. */
uint8_t rss_key_len; /**< hash key length in bytes. */
@@ -770,25 +770,25 @@ link_speed_key(uint16_t speed) {
uint16_t key_speed;
switch (speed) {
- case ETH_SPEED_NUM_NONE:
+ case RTE_ETH_SPEED_NUM_NONE:
key_speed = 0x00;
break;
- case ETH_SPEED_NUM_10M:
+ case RTE_ETH_SPEED_NUM_10M:
key_speed = BOND_LINK_SPEED_KEY_10M;
break;
- case ETH_SPEED_NUM_100M:
+ case RTE_ETH_SPEED_NUM_100M:
key_speed = BOND_LINK_SPEED_KEY_100M;
break;
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
key_speed = BOND_LINK_SPEED_KEY_1000M;
break;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
key_speed = BOND_LINK_SPEED_KEY_10G;
break;
- case ETH_SPEED_NUM_20G:
+ case RTE_ETH_SPEED_NUM_20G:
key_speed = BOND_LINK_SPEED_KEY_20G;
break;
- case ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_40G:
key_speed = BOND_LINK_SPEED_KEY_40G;
break;
default:
@@ -887,7 +887,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
if (ret >= 0 && link_info.link_status != 0) {
key = link_speed_key(link_info.link_speed) << 1;
- if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
+ if (link_info.link_duplex == RTE_ETH_LINK_FULL_DUPLEX)
key |= BOND_LINK_FULL_DUPLEX_KEY;
} else {
key = 0;
@@ -204,7 +204,7 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
bonded_eth_dev = &rte_eth_devices[bonded_port_id];
if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER) == 0)
return 0;
internals = bonded_eth_dev->data->dev_private;
@@ -592,7 +592,7 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
return -1;
}
- if (link_props.link_status == ETH_LINK_UP) {
+ if (link_props.link_status == RTE_ETH_LINK_UP) {
if (internals->active_slave_count == 0 &&
!internals->user_defined_primary_port)
bond_ethdev_primary_set(internals,
@@ -727,7 +727,7 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
internals->tx_offload_capa = 0;
internals->rx_queue_offload_capa = 0;
internals->tx_queue_offload_capa = 0;
- internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+ internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
internals->reta_size = 0;
internals->candidate_max_rx_pktlen = 0;
internals->max_rx_pktlen = 0;
@@ -1369,8 +1369,8 @@ link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
* In any other mode the link properties are set to default
* values of AUTONEG/DUPLEX
*/
- ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
- ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ ethdev->data->dev_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
+ ethdev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
}
}
@@ -1700,7 +1700,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
/* If RSS is enabled for bonding, try to enable it for slaves */
- if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
/* rss_key won't be empty if RSS is configured in bonded dev */
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
internals->rss_key_len;
@@ -1714,12 +1714,12 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
}
if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER)
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
slave_eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_VLAN_FILTER;
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
else
slave_eth_dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_VLAN_FILTER;
+ ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
slave_eth_dev->data->dev_conf.rxmode.mtu =
bonded_eth_dev->data->dev_conf.rxmode.mtu;
@@ -1823,7 +1823,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
}
/* If RSS is enabled for bonding, synchronize RETA */
- if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
int i;
struct bond_dev_private *internals;
@@ -1946,7 +1946,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
return -1;
}
- eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
eth_dev->data->dev_started = 1;
internals = eth_dev->data->dev_private;
@@ -2086,7 +2086,7 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
tlb_last_obytets[internals->active_slaves[i]] = 0;
}
- eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
eth_dev->data->dev_started = 0;
internals->link_status_polling_enabled = 0;
@@ -2416,15 +2416,15 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
bond_ctx = ethdev->data->dev_private;
- ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
if (ethdev->data->dev_started == 0 ||
bond_ctx->active_slave_count == 0) {
- ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
+ ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
- ethdev->data->dev_link.link_status = ETH_LINK_UP;
+ ethdev->data->dev_link.link_status = RTE_ETH_LINK_UP;
if (wait_to_complete)
link_update = rte_eth_link_get;
@@ -2449,7 +2449,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
&slave_link);
if (ret < 0) {
ethdev->data->dev_link.link_speed =
- ETH_SPEED_NUM_NONE;
+ RTE_ETH_SPEED_NUM_NONE;
RTE_BOND_LOG(ERR,
"Slave (port %u) link get failed: %s",
bond_ctx->active_slaves[idx],
@@ -2491,7 +2491,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
* In theses mode the maximum theoretical link speed is the sum
* of all the slaves
*/
- ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
one_link_update_succeeded = false;
for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
@@ -2865,7 +2865,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
goto link_update;
/* check link state properties if bonded link is up*/
- if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
+ if (bonded_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
if (link_properties_valid(bonded_eth_dev, &link) != 0)
RTE_BOND_LOG(ERR, "Invalid link properties "
"for slave %d in bonding mode %d",
@@ -2881,7 +2881,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
if (internals->active_slave_count < 1) {
/* If first active slave, then change link status */
bonded_eth_dev->data->dev_link.link_status =
- ETH_LINK_UP;
+ RTE_ETH_LINK_UP;
internals->current_primary_port = port_id;
lsc_flag = 1;
@@ -2973,12 +2973,12 @@ bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
return -EINVAL;
/* Copy RETA table */
- reta_count = (reta_size + RTE_RETA_GROUP_SIZE - 1) /
- RTE_RETA_GROUP_SIZE;
+ reta_count = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) /
+ RTE_ETH_RETA_GROUP_SIZE;
for (i = 0; i < reta_count; i++) {
internals->reta_conf[i].mask = reta_conf[i].mask;
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
if ((reta_conf[i].mask >> j) & 0x01)
internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
}
@@ -3011,8 +3011,8 @@ bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
return -EINVAL;
/* Copy RETA table */
- for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++)
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
if ((reta_conf[i].mask >> j) & 0x01)
reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
@@ -3274,7 +3274,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
internals->max_rx_pktlen = 0;
/* Initially allow to choose any offload type */
- internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+ internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
memset(&internals->default_rxconf, 0,
sizeof(internals->default_rxconf));
@@ -3501,7 +3501,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
* set key to the the value specified in port RSS configuration.
* Fall back to default RSS key if the key is not specified
*/
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
struct rte_eth_rss_conf *rss_conf =
&dev->data->dev_conf.rx_adv_conf.rss_conf;
if (rss_conf->rss_key != NULL) {
@@ -3526,9 +3526,9 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
internals->reta_conf[i].mask = ~0LL;
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
internals->reta_conf[i].reta[j] =
- (i * RTE_RETA_GROUP_SIZE + j) %
+ (i * RTE_ETH_RETA_GROUP_SIZE + j) %
dev->data->nb_rx_queues;
}
}
@@ -15,28 +15,28 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
struct rte_eth_rxmode *rxmode = &conf->rxmode;
uint16_t flags = 0;
- if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
- (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+ if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+ (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
flags |= NIX_RX_OFFLOAD_RSS_F;
if (dev->rx_offloads &
- (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+ (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
if (dev->rx_offloads &
- (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
flags |= NIX_RX_MULTI_SEG_F;
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_RX_OFFLOAD_TSTAMP_F;
if (!dev->ptype_disable)
flags |= NIX_RX_OFFLOAD_PTYPE_F;
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
flags |= NIX_RX_OFFLOAD_SECURITY_F;
return flags;
@@ -72,39 +72,39 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
- if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
- conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+ if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+ conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
- if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
- conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
- if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
- conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
- conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+ if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
- if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
- if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
flags |= NIX_TX_MULTI_SEG_F;
/* Enable Inner checksum for TSO */
- if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+ if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
/* Enable Inner and Outer checksum for Tunnel TSO */
- if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+ if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
NIX_TX_OFFLOAD_L3_L4_CSUM_F);
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_TX_OFFLOAD_TSTAMP_F;
- if (conf & DEV_TX_OFFLOAD_SECURITY)
+ if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
flags |= NIX_TX_OFFLOAD_SECURITY_F;
return flags;
@@ -98,7 +98,7 @@ cn10k_rss_action_validate(struct rte_eth_dev *eth_dev,
return -EINVAL;
}
- if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
plt_err("multi-queue mode is disabled");
return -ENOTSUP;
}
@@ -77,12 +77,12 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
if (dev->scalar_ena) {
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
return pick_rx_func(eth_dev, nix_eth_rx_burst);
}
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
}
@@ -78,11 +78,11 @@ cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
if (dev->scalar_ena) {
pick_tx_func(eth_dev, nix_eth_tx_burst);
- if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
} else {
pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
- if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
}
@@ -15,28 +15,28 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
struct rte_eth_rxmode *rxmode = &conf->rxmode;
uint16_t flags = 0;
- if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
- (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+ if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+ (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
flags |= NIX_RX_OFFLOAD_RSS_F;
if (dev->rx_offloads &
- (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+ (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
if (dev->rx_offloads &
- (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
flags |= NIX_RX_MULTI_SEG_F;
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_RX_OFFLOAD_TSTAMP_F;
if (!dev->ptype_disable)
flags |= NIX_RX_OFFLOAD_PTYPE_F;
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
flags |= NIX_RX_OFFLOAD_SECURITY_F;
return flags;
@@ -72,39 +72,39 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
- if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
- conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+ if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+ conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
- if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
- conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
- if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
- conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
- conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+ if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
- if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
- if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
flags |= NIX_TX_MULTI_SEG_F;
/* Enable Inner checksum for TSO */
- if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+ if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
/* Enable Inner and Outer checksum for Tunnel TSO */
- if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+ if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
NIX_TX_OFFLOAD_L3_L4_CSUM_F);
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_TX_OFFLOAD_TSTAMP_F;
- if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
flags |= NIX_TX_OFFLOAD_SECURITY_F;
return flags;
@@ -298,9 +298,9 @@ cn9k_nix_configure(struct rte_eth_dev *eth_dev)
/* Platform specific checks */
if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
- (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
- ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
- (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+ (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+ ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+ (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
plt_err("Outer IP and SCTP checksum unsupported");
return -EINVAL;
}
@@ -553,17 +553,17 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
* TSO not supported for earlier chip revisions
*/
if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
- dev->tx_offload_capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
/* 50G and 100G to be supported for board version C0
* and above of CN9K.
*/
if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
- dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_50G;
- dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_100G;
+ dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G;
+ dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G;
}
dev->hwcap = 0;
@@ -77,12 +77,12 @@ cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
if (dev->scalar_ena) {
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
return pick_rx_func(eth_dev, nix_eth_rx_burst);
}
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
}
@@ -77,11 +77,11 @@ cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
if (dev->scalar_ena) {
pick_tx_func(eth_dev, nix_eth_tx_burst);
- if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
} else {
pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
- if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
}
@@ -10,7 +10,7 @@ nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
if (roc_nix_is_vf_or_sdp(&dev->nix) ||
dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
- capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+ capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
return capa;
}
@@ -28,11 +28,11 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
uint32_t speed_capa;
/* Auto negotiation disabled */
- speed_capa = ETH_LINK_SPEED_FIXED;
+ speed_capa = RTE_ETH_LINK_SPEED_FIXED;
if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
- speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+ speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
}
return speed_capa;
@@ -65,7 +65,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
struct roc_nix *nix = &dev->nix;
int i, rc = 0;
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
/* Setup Inline Inbound */
rc = roc_nix_inl_inb_init(nix);
if (rc) {
@@ -80,8 +80,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
cnxk_nix_inb_mode_set(dev, true);
}
- if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
- dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+ dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
struct plt_bitmap *bmap;
size_t bmap_sz;
void *mem;
@@ -100,8 +100,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
- /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
- if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY))
+ /* Skip the rest if RTE_ETH_TX_OFFLOAD_SECURITY is not enabled */
+ if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
goto done;
rc = -ENOMEM;
@@ -136,7 +136,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
done:
return 0;
cleanup:
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
rc |= roc_nix_inl_inb_fini(nix);
return rc;
}
@@ -182,7 +182,7 @@ nix_security_release(struct cnxk_eth_dev *dev)
int rc, ret = 0;
/* Cleanup Inline inbound */
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
/* Destroy inbound sessions */
tvar = NULL;
RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
@@ -199,8 +199,8 @@ nix_security_release(struct cnxk_eth_dev *dev)
}
/* Cleanup Inline outbound */
- if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
- dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+ dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
/* Destroy outbound sessions */
tvar = NULL;
RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
@@ -242,8 +242,8 @@ nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
- dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
- dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+ dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
}
}
@@ -273,7 +273,7 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
struct rte_eth_fc_conf fc_conf = {0};
int rc;
- /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+ /* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
* by AF driver, update those info in PMD structure.
*/
rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
@@ -281,10 +281,10 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
goto exit;
fc->mode = fc_conf.mode;
- fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
- (fc_conf.mode == RTE_FC_RX_PAUSE);
- fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
- (fc_conf.mode == RTE_FC_TX_PAUSE);
+ fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+ (fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+ fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+ (fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
exit:
return rc;
@@ -305,11 +305,11 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
if (roc_model_is_cn96_ax() &&
dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
- (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+ (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
fc_cfg.mode =
- (fc_cfg.mode == RTE_FC_FULL ||
- fc_cfg.mode == RTE_FC_TX_PAUSE) ?
- RTE_FC_TX_PAUSE : RTE_FC_NONE;
+ (fc_cfg.mode == RTE_ETH_FC_FULL ||
+ fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
+ RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
}
return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
@@ -352,7 +352,7 @@ nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
* Maximum three segments can be supported with W8, Choose
* NIX_MAXSQESZ_W16 for multi segment offload.
*/
- if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
return NIX_MAXSQESZ_W16;
else
return NIX_MAXSQESZ_W8;
@@ -380,7 +380,7 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
/* When Tx Security offload is enabled, increase tx desc count by
* max possible outbound desc count.
*/
- if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
nb_desc += dev->outb.nb_desc;
/* Setup ROC SQ */
@@ -499,7 +499,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
* to avoid meta packet drop as LBK does not currently support
* backpressure.
*/
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
/* Use current RQ's aura limit if inl rq is not available */
@@ -561,7 +561,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
rxq_sp->qconf.nb_desc = nb_desc;
rxq_sp->qconf.mp = mp;
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
/* Setup rq reference for inline dev if present */
rc = roc_nix_inl_dev_rq_get(rq);
if (rc)
@@ -579,7 +579,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
* These are needed in deriving raw clock value from tsc counter.
* read_clock eth op returns raw clock value.
*/
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
rc = cnxk_nix_tsc_convert(dev);
if (rc) {
plt_err("Failed to calculate delta and freq mult");
@@ -618,7 +618,7 @@ cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
plt_nix_dbg("Releasing rxq %u", qid);
/* Release rq reference for inline dev if present */
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
roc_nix_inl_dev_rq_put(rq);
/* Cleanup ROC RQ */
@@ -657,24 +657,24 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
dev->ethdev_rss_hf = ethdev_rss;
- if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+ if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
}
- if (ethdev_rss & ETH_RSS_C_VLAN)
+ if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
- if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
- if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
- if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
- if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
if (ethdev_rss & RSS_IPV4_ENABLE)
@@ -683,34 +683,34 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
if (ethdev_rss & RSS_IPV6_ENABLE)
flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
- if (ethdev_rss & ETH_RSS_TCP)
+ if (ethdev_rss & RTE_ETH_RSS_TCP)
flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
- if (ethdev_rss & ETH_RSS_UDP)
+ if (ethdev_rss & RTE_ETH_RSS_UDP)
flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
- if (ethdev_rss & ETH_RSS_SCTP)
+ if (ethdev_rss & RTE_ETH_RSS_SCTP)
flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
- if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+ if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
if (ethdev_rss & RSS_IPV6_EX_ENABLE)
flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
- if (ethdev_rss & ETH_RSS_PORT)
+ if (ethdev_rss & RTE_ETH_RSS_PORT)
flowkey_cfg |= FLOW_KEY_TYPE_PORT;
- if (ethdev_rss & ETH_RSS_NVGRE)
+ if (ethdev_rss & RTE_ETH_RSS_NVGRE)
flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
- if (ethdev_rss & ETH_RSS_VXLAN)
+ if (ethdev_rss & RTE_ETH_RSS_VXLAN)
flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
- if (ethdev_rss & ETH_RSS_GENEVE)
+ if (ethdev_rss & RTE_ETH_RSS_GENEVE)
flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
- if (ethdev_rss & ETH_RSS_GTPU)
+ if (ethdev_rss & RTE_ETH_RSS_GTPU)
flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
return flowkey_cfg;
@@ -746,7 +746,7 @@ nix_rss_default_setup(struct cnxk_eth_dev *dev)
uint64_t rss_hf;
rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
- rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+ rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
if (rss_hash_level)
rss_hash_level -= 1;
@@ -958,8 +958,8 @@ nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
/* Nothing much to do if offload is not enabled */
if (!(dev->tx_offloads &
- (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
+ (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
return 0;
/* Setup LSO formats in AF. Its a no-op if other ethdev has
@@ -1007,13 +1007,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
goto fail_configure;
}
- if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
- rxmode->mq_mode != ETH_MQ_RX_RSS) {
+ if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+ rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
goto fail_configure;
}
- if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+ if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
goto fail_configure;
}
@@ -1054,7 +1054,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
/* Prepare rx cfg */
rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
if (dev->rx_offloads &
- (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+ (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
}
@@ -1062,7 +1062,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
/* Disable drop re if rx offload security is enabled and
* platform does not support it.
@@ -1454,12 +1454,12 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
* enabled on PF owning this VF
*/
memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
cnxk_eth_dev_ops.timesync_enable(eth_dev);
else
cnxk_eth_dev_ops.timesync_disable(eth_dev);
- if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
rc = rte_mbuf_dyn_rx_timestamp_register
(&dev->tstamp.tstamp_dynfield_offset,
&dev->tstamp.rx_tstamp_dynflag);
@@ -58,41 +58,44 @@
CNXK_NIX_TX_NB_SEG_MAX)
#define CNXK_NIX_RSS_L3_L4_SRC_DST \
- (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY | \
- ETH_RSS_L4_DST_ONLY)
+ (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY | \
+ RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
#define CNXK_NIX_RSS_OFFLOAD \
- (ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | \
- ETH_RSS_SCTP | ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD | \
- CNXK_NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | ETH_RSS_C_VLAN)
+ (RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | \
+ RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_TUNNEL | \
+ RTE_ETH_RSS_L2_PAYLOAD | CNXK_NIX_RSS_L3_L4_SRC_DST | \
+ RTE_ETH_RSS_LEVEL_MASK | RTE_ETH_RSS_C_VLAN)
#define CNXK_NIX_TX_OFFLOAD_CAPA \
- (DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_MT_LOCKFREE | \
- DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | \
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
- DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS | \
- DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_SECURITY)
+ (RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE | \
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_SECURITY)
#define CNXK_NIX_RX_OFFLOAD_CAPA \
- (DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM | \
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | DEV_RX_OFFLOAD_RSS_HASH | \
- DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_SECURITY)
+ (RTE_ETH_RX_OFFLOAD_CHECKSUM | RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_RSS_HASH | \
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP | RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+ RTE_ETH_RX_OFFLOAD_SECURITY)
#define RSS_IPV4_ENABLE \
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
#define RSS_IPV6_ENABLE \
- (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
+ (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
#define RSS_IPV6_EX_ENABLE \
- (ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
+ (RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX)
#define RSS_MAX_LEVELS 3
@@ -104,11 +104,11 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
val = atoi(value);
- if (val <= ETH_RSS_RETA_SIZE_64)
+ if (val <= RTE_ETH_RSS_RETA_SIZE_64)
val = ROC_NIX_RSS_RETA_SZ_64;
- else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
+ else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
val = ROC_NIX_RSS_RETA_SZ_128;
- else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
+ else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
val = ROC_NIX_RSS_RETA_SZ_256;
else
val = ROC_NIX_RSS_RETA_SZ_64;
@@ -81,24 +81,24 @@ cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
uint64_t flags;
const char *output;
} rx_offload_map[] = {
- {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
- {DEV_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
- {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
- {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
- {DEV_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
- {DEV_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
- {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
- {DEV_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
- {DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
- {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
- {DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
- {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
- {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
- {DEV_RX_OFFLOAD_SECURITY, " Security,"},
- {DEV_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
- {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
- {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
- {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+ {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
+ {RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+ {RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+ {RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+ {RTE_ETH_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
+ {RTE_ETH_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
+ {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+ {RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
+ {RTE_ETH_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
+ {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
+ {RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
+ {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+ {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+ {RTE_ETH_RX_OFFLOAD_SECURITY, " Security,"},
+ {RTE_ETH_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
+ {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
+ {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+ {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
};
static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
"Scalar, Rx Offloads:"
@@ -142,28 +142,28 @@ cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
uint64_t flags;
const char *output;
} tx_offload_map[] = {
- {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
- {DEV_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
- {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
- {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
- {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
- {DEV_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
- {DEV_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
- {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
- {DEV_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
- {DEV_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
- {DEV_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
- {DEV_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
- {DEV_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
- {DEV_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
- {DEV_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
- {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
- {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
- {DEV_TX_OFFLOAD_SECURITY, " Security,"},
- {DEV_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
- {DEV_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
- {DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
- {DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
+ {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+ {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+ {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+ {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+ {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
+ {RTE_ETH_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
+ {RTE_ETH_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
+ {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+ {RTE_ETH_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
+ {RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
+ {RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
+ {RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
+ {RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
+ {RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
+ {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
+ {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
+ {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
+ {RTE_ETH_TX_OFFLOAD_SECURITY, " Security,"},
+ {RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
+ {RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
+ {RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+ {RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
};
static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
"Scalar, Tx Offloads:"
@@ -203,8 +203,8 @@ cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
enum rte_eth_fc_mode mode_map[] = {
- RTE_FC_NONE, RTE_FC_RX_PAUSE,
- RTE_FC_TX_PAUSE, RTE_FC_FULL
+ RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
+ RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
};
struct roc_nix *nix = &dev->nix;
int mode;
@@ -264,10 +264,10 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
if (fc_conf->mode == fc->mode)
return 0;
- rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
- (fc_conf->mode == RTE_FC_RX_PAUSE);
- tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
- (fc_conf->mode == RTE_FC_TX_PAUSE);
+ rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+ tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
/* Check if TX pause frame is already enabled or not */
if (fc->tx_pause ^ tx_pause) {
@@ -408,13 +408,13 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
* when this feature has not been enabled before.
*/
if (data->dev_started && frame_size > buffsz &&
- !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
plt_err("Scatter offload is not enabled for mtu");
goto exit;
}
/* Check <seg size> * <max_seg> >= max_frame */
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) &&
frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
plt_err("Greater than maximum supported packet length");
goto exit;
@@ -734,8 +734,8 @@ cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
}
/* Copy RETA table */
- for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
if ((reta_conf[i].mask >> j) & 0x01)
reta[idx] = reta_conf[i].reta[j];
idx++;
@@ -770,8 +770,8 @@ cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
goto fail;
/* Copy RETA table */
- for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
if ((reta_conf[i].mask >> j) & 0x01)
reta_conf[i].reta[j] = reta[idx];
idx++;
@@ -804,7 +804,7 @@ cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
if (rss_conf->rss_key)
roc_nix_rss_key_set(nix, rss_conf->rss_key);
- rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+ rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
if (rss_hash_level)
rss_hash_level -= 1;
flowkey_cfg =
@@ -38,7 +38,7 @@ nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
plt_info("Port %d: Link Up - speed %u Mbps - %s",
(int)(eth_dev->data->port_id),
(uint32_t)link->link_speed,
- link->link_duplex == ETH_LINK_FULL_DUPLEX
+ link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX
? "full-duplex"
: "half-duplex");
else
@@ -89,7 +89,7 @@ cnxk_eth_dev_link_status_cb(struct roc_nix *nix, struct roc_nix_link_info *link)
eth_link.link_status = link->status;
eth_link.link_speed = link->speed;
- eth_link.link_autoneg = ETH_LINK_AUTONEG;
+ eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
eth_link.link_duplex = link->full_duplex;
/* Print link info */
@@ -117,17 +117,17 @@ cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
return 0;
if (roc_nix_is_lbk(&dev->nix)) {
- link.link_status = ETH_LINK_UP;
- link.link_speed = ETH_SPEED_NUM_100G;
- link.link_autoneg = ETH_LINK_FIXED;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_speed = RTE_ETH_SPEED_NUM_100G;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
} else {
rc = roc_nix_mac_link_info_get(&dev->nix, &info);
if (rc)
return rc;
link.link_status = info.status;
link.link_speed = info.speed;
- link.link_autoneg = ETH_LINK_AUTONEG;
+ link.link_autoneg = RTE_ETH_LINK_AUTONEG;
if (info.full_duplex)
link.link_duplex = info.full_duplex;
}
@@ -227,7 +227,7 @@ cnxk_nix_timesync_enable(struct rte_eth_dev *eth_dev)
dev->rx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
dev->tx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
- dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+ dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
rc = roc_nix_ptp_rx_ena_dis(nix, true);
if (!rc) {
@@ -257,7 +257,7 @@ int
cnxk_nix_timesync_disable(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- uint64_t rx_offloads = DEV_RX_OFFLOAD_TIMESTAMP;
+ uint64_t rx_offloads = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
struct roc_nix *nix = &dev->nix;
int rc = 0;
@@ -69,7 +69,7 @@ npc_rss_action_validate(struct rte_eth_dev *eth_dev,
return -EINVAL;
}
- if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
plt_err("multi-queue mode is disabled");
return -ENOTSUP;
}
@@ -28,31 +28,31 @@
#define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */
#define CXGBE_DEFAULT_RSS_KEY_LEN 40 /* 320-bits */
-#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_OTHER)
-#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_OTHER | \
- ETH_RSS_IPV6_EX)
-#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_IPV6_TCP_EX)
-#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_IPV6_UDP_EX)
-#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+#define CXGBE_RSS_HF_IPV4_MASK (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
+#define CXGBE_RSS_HF_IPV6_MASK (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+ RTE_ETH_RSS_IPV6_EX)
+#define CXGBE_RSS_HF_TCP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_IPV6_TCP_EX)
+#define CXGBE_RSS_HF_UDP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
+#define CXGBE_RSS_HF_ALL (RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP)
/* Tx/Rx Offloads supported */
-#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT | \
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_IPV4_CKSUM | \
- DEV_RX_OFFLOAD_UDP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_CKSUM | \
- DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_RSS_HASH)
+#define CXGBE_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define CXGBE_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
/* Devargs filtermode and filtermask representation */
enum cxgbe_devargs_filter_mode_flags {
@@ -231,9 +231,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
}
new_link.link_status = cxgbe_force_linkup(adapter) ?
- ETH_LINK_UP : pi->link_cfg.link_ok;
+ RTE_ETH_LINK_UP : pi->link_cfg.link_ok;
new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
- new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
return rte_eth_linkstatus_set(eth_dev, &new_link);
@@ -374,7 +374,7 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
goto out;
}
- if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
eth_dev->data->scattered_rx = 1;
else
eth_dev->data->scattered_rx = 0;
@@ -438,9 +438,9 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
CXGBE_FUNC_TRACE();
- if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
if (!(adapter->flags & FW_QUEUE_BOUND)) {
err = cxgbe_setup_sge_fwevtq(adapter);
@@ -1080,13 +1080,13 @@ static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
rx_pause = 1;
if (rx_pause && tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
@@ -1099,12 +1099,12 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
u8 tx_pause = 0, rx_pause = 0;
int ret;
- if (fc_conf->mode == RTE_FC_FULL) {
+ if (fc_conf->mode == RTE_ETH_FC_FULL) {
tx_pause = 1;
rx_pause = 1;
- } else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
+ } else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE) {
tx_pause = 1;
- } else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
+ } else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE) {
rx_pause = 1;
}
@@ -1200,9 +1200,9 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
}
if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
@@ -1246,8 +1246,8 @@ static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << shift)))
continue;
@@ -1277,8 +1277,8 @@ static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
return -EINVAL;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << shift)))
continue;
@@ -1479,7 +1479,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
if (capa_arr) {
- capa_arr[num].speed = ETH_SPEED_NUM_100G;
+ capa_arr[num].speed = RTE_ETH_SPEED_NUM_100G;
capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS);
}
@@ -1488,7 +1488,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
if (capa_arr) {
- capa_arr[num].speed = ETH_SPEED_NUM_50G;
+ capa_arr[num].speed = RTE_ETH_SPEED_NUM_50G;
capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
}
@@ -1497,7 +1497,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
if (capa_arr) {
- capa_arr[num].speed = ETH_SPEED_NUM_25G;
+ capa_arr[num].speed = RTE_ETH_SPEED_NUM_25G;
capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS);
@@ -1670,7 +1670,7 @@ int cxgbe_link_start(struct port_info *pi)
* that step explicitly.
*/
ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1,
- !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP),
+ !!(conf_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP),
true);
if (ret == 0) {
ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
@@ -1694,7 +1694,7 @@ int cxgbe_link_start(struct port_info *pi)
}
if (ret == 0 && cxgbe_force_linkup(adapter))
- pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+ pi->eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return ret;
}
@@ -1725,10 +1725,10 @@ int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
F_FW_RSS_VI_CONFIG_CMD_UDPEN;
@@ -1865,7 +1865,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type,
{
#define SET_SPEED(__speed_name) \
do { \
- *speed_caps |= ETH_LINK_ ## __speed_name; \
+ *speed_caps |= RTE_ETH_LINK_ ## __speed_name; \
} while (0)
#define FW_CAPS_TO_SPEED(__fw_name) \
@@ -1952,7 +1952,7 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
speed_caps);
if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
- *speed_caps |= ETH_LINK_SPEED_FIXED;
+ *speed_caps |= RTE_ETH_LINK_SPEED_FIXED;
}
/**
@@ -54,29 +54,29 @@
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
- DEV_RX_OFFLOAD_SCATTER;
+ RTE_ETH_RX_OFFLOAD_SCATTER;
/* Rx offloads which cannot be disabled */
static uint64_t dev_rx_offloads_nodis =
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* Supported Tx offloads */
static uint64_t dev_tx_offloads_sup =
- DEV_TX_OFFLOAD_MT_LOCKFREE |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Tx offloads which cannot be disabled */
static uint64_t dev_tx_offloads_nodis =
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS;
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
/* Keep track of whether QMAN and BMAN have been globally initialized */
static int is_global_init;
@@ -238,7 +238,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
- if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
DPAA_PMD_DEBUG("enabling scatter mode");
fman_if_set_sg(dev->process_private, 1);
dev->data->scattered_rx = 1;
@@ -283,43 +283,43 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
/* Configure link only if link is UP*/
if (link->link_status) {
- if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (eth_conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
/* Start autoneg only if link is not in autoneg mode */
if (!link->link_autoneg)
dpaa_restart_link_autoneg(__fif->node_name);
- } else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
- switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
- case ETH_LINK_SPEED_10M_HD:
- speed = ETH_SPEED_NUM_10M;
- duplex = ETH_LINK_HALF_DUPLEX;
+ } else if (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
+ switch (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
+ case RTE_ETH_LINK_SPEED_10M_HD:
+ speed = RTE_ETH_SPEED_NUM_10M;
+ duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
- case ETH_LINK_SPEED_10M:
- speed = ETH_SPEED_NUM_10M;
- duplex = ETH_LINK_FULL_DUPLEX;
+ case RTE_ETH_LINK_SPEED_10M:
+ speed = RTE_ETH_SPEED_NUM_10M;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
- case ETH_LINK_SPEED_100M_HD:
- speed = ETH_SPEED_NUM_100M;
- duplex = ETH_LINK_HALF_DUPLEX;
+ case RTE_ETH_LINK_SPEED_100M_HD:
+ speed = RTE_ETH_SPEED_NUM_100M;
+ duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
- case ETH_LINK_SPEED_100M:
- speed = ETH_SPEED_NUM_100M;
- duplex = ETH_LINK_FULL_DUPLEX;
+ case RTE_ETH_LINK_SPEED_100M:
+ speed = RTE_ETH_SPEED_NUM_100M;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
- case ETH_LINK_SPEED_1G:
- speed = ETH_SPEED_NUM_1G;
- duplex = ETH_LINK_FULL_DUPLEX;
+ case RTE_ETH_LINK_SPEED_1G:
+ speed = RTE_ETH_SPEED_NUM_1G;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
- case ETH_LINK_SPEED_2_5G:
- speed = ETH_SPEED_NUM_2_5G;
- duplex = ETH_LINK_FULL_DUPLEX;
+ case RTE_ETH_LINK_SPEED_2_5G:
+ speed = RTE_ETH_SPEED_NUM_2_5G;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
- case ETH_LINK_SPEED_10G:
- speed = ETH_SPEED_NUM_10G;
- duplex = ETH_LINK_FULL_DUPLEX;
+ case RTE_ETH_LINK_SPEED_10G:
+ speed = RTE_ETH_SPEED_NUM_10G;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
default:
- speed = ETH_SPEED_NUM_NONE;
- duplex = ETH_LINK_FULL_DUPLEX;
+ speed = RTE_ETH_SPEED_NUM_NONE;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
}
/* Set link speed */
@@ -535,30 +535,30 @@ static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
dev_info->max_hash_mac_addrs = 0;
dev_info->max_vfs = 0;
- dev_info->max_vmdq_pools = ETH_16_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
if (fif->mac_type == fman_mac_1g) {
- dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
- | ETH_LINK_SPEED_10M
- | ETH_LINK_SPEED_100M_HD
- | ETH_LINK_SPEED_100M
- | ETH_LINK_SPEED_1G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+ | RTE_ETH_LINK_SPEED_10M
+ | RTE_ETH_LINK_SPEED_100M_HD
+ | RTE_ETH_LINK_SPEED_100M
+ | RTE_ETH_LINK_SPEED_1G;
} else if (fif->mac_type == fman_mac_2_5g) {
- dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
- | ETH_LINK_SPEED_10M
- | ETH_LINK_SPEED_100M_HD
- | ETH_LINK_SPEED_100M
- | ETH_LINK_SPEED_1G
- | ETH_LINK_SPEED_2_5G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+ | RTE_ETH_LINK_SPEED_10M
+ | RTE_ETH_LINK_SPEED_100M_HD
+ | RTE_ETH_LINK_SPEED_100M
+ | RTE_ETH_LINK_SPEED_1G
+ | RTE_ETH_LINK_SPEED_2_5G;
} else if (fif->mac_type == fman_mac_10g) {
- dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
- | ETH_LINK_SPEED_10M
- | ETH_LINK_SPEED_100M_HD
- | ETH_LINK_SPEED_100M
- | ETH_LINK_SPEED_1G
- | ETH_LINK_SPEED_2_5G
- | ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+ | RTE_ETH_LINK_SPEED_10M
+ | RTE_ETH_LINK_SPEED_100M_HD
+ | RTE_ETH_LINK_SPEED_100M
+ | RTE_ETH_LINK_SPEED_1G
+ | RTE_ETH_LINK_SPEED_2_5G
+ | RTE_ETH_LINK_SPEED_10G;
} else {
DPAA_PMD_ERR("invalid link_speed: %s, %d",
dpaa_intf->name, fif->mac_type);
@@ -591,12 +591,12 @@ dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
uint64_t flags;
const char *output;
} rx_offload_map[] = {
- {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
- {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
- {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
- {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
- {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
- {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+ {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+ {RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+ {RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+ {RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+ {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+ {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
};
/* Update Rx offload info */
@@ -623,14 +623,14 @@ dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
uint64_t flags;
const char *output;
} tx_offload_map[] = {
- {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
- {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
- {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
- {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
- {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
- {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
- {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
- {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+ {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+ {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+ {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+ {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+ {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+ {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+ {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+ {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
};
/* Update Tx offload info */
@@ -664,7 +664,7 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
ret = dpaa_get_link_status(__fif->node_name, link);
if (ret)
return ret;
- if (link->link_status == ETH_LINK_DOWN &&
+ if (link->link_status == RTE_ETH_LINK_DOWN &&
wait_to_complete)
rte_delay_ms(CHECK_INTERVAL);
else
@@ -675,15 +675,15 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
}
if (ioctl_version < 2) {
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
- link->link_autoneg = ETH_LINK_AUTONEG;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link->link_autoneg = RTE_ETH_LINK_AUTONEG;
if (fif->mac_type == fman_mac_1g)
- link->link_speed = ETH_SPEED_NUM_1G;
+ link->link_speed = RTE_ETH_SPEED_NUM_1G;
else if (fif->mac_type == fman_mac_2_5g)
- link->link_speed = ETH_SPEED_NUM_2_5G;
+ link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
else if (fif->mac_type == fman_mac_10g)
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
else
DPAA_PMD_ERR("invalid link_speed: %s, %d",
dpaa_intf->name, fif->mac_type);
@@ -962,7 +962,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (max_rx_pktlen <= buffsz) {
;
} else if (dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_SCATTER) {
+ RTE_ETH_RX_OFFLOAD_SCATTER) {
if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
"MaxSGlist %d",
@@ -1268,7 +1268,7 @@ static int dpaa_link_down(struct rte_eth_dev *dev)
__fif = container_of(fif, struct __fman_if, __if);
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
+ dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_DOWN);
else
return dpaa_eth_dev_stop(dev);
return 0;
@@ -1284,7 +1284,7 @@ static int dpaa_link_up(struct rte_eth_dev *dev)
__fif = container_of(fif, struct __fman_if, __if);
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
+ dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_UP);
else
dpaa_eth_dev_start(dev);
return 0;
@@ -1314,10 +1314,10 @@ dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (fc_conf->mode == RTE_FC_NONE) {
+ if (fc_conf->mode == RTE_ETH_FC_NONE) {
return 0;
- } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
- fc_conf->mode == RTE_FC_FULL) {
+ } else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE ||
+ fc_conf->mode == RTE_ETH_FC_FULL) {
fman_if_set_fc_threshold(dev->process_private,
fc_conf->high_water,
fc_conf->low_water,
@@ -1361,11 +1361,11 @@ dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
}
ret = fman_if_get_fc_threshold(dev->process_private);
if (ret) {
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
fc_conf->pause_time =
fman_if_get_fc_quanta(dev->process_private);
} else {
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
}
return 0;
@@ -1626,10 +1626,10 @@ static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
fc_conf = dpaa_intf->fc_conf;
ret = fman_if_get_fc_threshold(fman_intf);
if (ret) {
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
} else {
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
}
return 0;
@@ -74,11 +74,11 @@
#define DPAA_DEBUG_FQ_TX_ERROR 1
#define DPAA_RSS_OFFLOAD_ALL ( \
- ETH_RSS_L2_PAYLOAD | \
- ETH_RSS_IP | \
- ETH_RSS_UDP | \
- ETH_RSS_TCP | \
- ETH_RSS_SCTP)
+ RTE_ETH_RSS_L2_PAYLOAD | \
+ RTE_ETH_RSS_IP | \
+ RTE_ETH_RSS_UDP | \
+ RTE_ETH_RSS_TCP | \
+ RTE_ETH_RSS_SCTP)
#define DPAA_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
@@ -394,7 +394,7 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
if (req_dist_set % 2 != 0) {
dist_field = 1U << loop;
switch (dist_field) {
- case ETH_RSS_L2_PAYLOAD:
+ case RTE_ETH_RSS_L2_PAYLOAD:
if (l2_configured)
break;
@@ -404,9 +404,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
= HEADER_TYPE_ETH;
break;
- case ETH_RSS_IPV4:
- case ETH_RSS_FRAG_IPV4:
- case ETH_RSS_NONFRAG_IPV4_OTHER:
+ case RTE_ETH_RSS_IPV4:
+ case RTE_ETH_RSS_FRAG_IPV4:
+ case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
if (ipv4_configured)
break;
@@ -415,10 +415,10 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
= HEADER_TYPE_IPV4;
break;
- case ETH_RSS_IPV6:
- case ETH_RSS_FRAG_IPV6:
- case ETH_RSS_NONFRAG_IPV6_OTHER:
- case ETH_RSS_IPV6_EX:
+ case RTE_ETH_RSS_IPV6:
+ case RTE_ETH_RSS_FRAG_IPV6:
+ case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+ case RTE_ETH_RSS_IPV6_EX:
if (ipv6_configured)
break;
@@ -427,9 +427,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
= HEADER_TYPE_IPV6;
break;
- case ETH_RSS_NONFRAG_IPV4_TCP:
- case ETH_RSS_NONFRAG_IPV6_TCP:
- case ETH_RSS_IPV6_TCP_EX:
+ case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+ case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+ case RTE_ETH_RSS_IPV6_TCP_EX:
if (tcp_configured)
break;
@@ -438,9 +438,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
= HEADER_TYPE_TCP;
break;
- case ETH_RSS_NONFRAG_IPV4_UDP:
- case ETH_RSS_NONFRAG_IPV6_UDP:
- case ETH_RSS_IPV6_UDP_EX:
+ case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+ case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+ case RTE_ETH_RSS_IPV6_UDP_EX:
if (udp_configured)
break;
@@ -449,8 +449,8 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
= HEADER_TYPE_UDP;
break;
- case ETH_RSS_NONFRAG_IPV4_SCTP:
- case ETH_RSS_NONFRAG_IPV6_SCTP:
+ case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
if (sctp_configured)
break;
@@ -220,9 +220,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
if (req_dist_set % 2 != 0) {
dist_field = 1ULL << loop;
switch (dist_field) {
- case ETH_RSS_L2_PAYLOAD:
- case ETH_RSS_ETH:
-
+ case RTE_ETH_RSS_L2_PAYLOAD:
+ case RTE_ETH_RSS_ETH:
if (l2_configured)
break;
l2_configured = 1;
@@ -238,7 +237,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
- case ETH_RSS_PPPOE:
+ case RTE_ETH_RSS_PPPOE:
if (pppoe_configured)
break;
kg_cfg->extracts[i].extract.from_hdr.prot =
@@ -252,7 +251,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
- case ETH_RSS_ESP:
+ case RTE_ETH_RSS_ESP:
if (esp_configured)
break;
esp_configured = 1;
@@ -268,7 +267,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
- case ETH_RSS_AH:
+ case RTE_ETH_RSS_AH:
if (ah_configured)
break;
ah_configured = 1;
@@ -284,8 +283,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
- case ETH_RSS_C_VLAN:
- case ETH_RSS_S_VLAN:
+ case RTE_ETH_RSS_C_VLAN:
+ case RTE_ETH_RSS_S_VLAN:
if (vlan_configured)
break;
vlan_configured = 1;
@@ -301,7 +300,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
- case ETH_RSS_MPLS:
+ case RTE_ETH_RSS_MPLS:
if (mpls_configured)
break;
@@ -338,13 +337,13 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
- case ETH_RSS_IPV4:
- case ETH_RSS_FRAG_IPV4:
- case ETH_RSS_NONFRAG_IPV4_OTHER:
- case ETH_RSS_IPV6:
- case ETH_RSS_FRAG_IPV6:
- case ETH_RSS_NONFRAG_IPV6_OTHER:
- case ETH_RSS_IPV6_EX:
+ case RTE_ETH_RSS_IPV4:
+ case RTE_ETH_RSS_FRAG_IPV4:
+ case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
+ case RTE_ETH_RSS_IPV6:
+ case RTE_ETH_RSS_FRAG_IPV6:
+ case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+ case RTE_ETH_RSS_IPV6_EX:
if (l3_configured)
break;
@@ -382,12 +381,12 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
- case ETH_RSS_NONFRAG_IPV4_TCP:
- case ETH_RSS_NONFRAG_IPV6_TCP:
- case ETH_RSS_NONFRAG_IPV4_UDP:
- case ETH_RSS_NONFRAG_IPV6_UDP:
- case ETH_RSS_IPV6_TCP_EX:
- case ETH_RSS_IPV6_UDP_EX:
+ case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+ case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+ case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+ case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+ case RTE_ETH_RSS_IPV6_TCP_EX:
+ case RTE_ETH_RSS_IPV6_UDP_EX:
if (l4_configured)
break;
@@ -414,8 +413,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
- case ETH_RSS_NONFRAG_IPV4_SCTP:
- case ETH_RSS_NONFRAG_IPV6_SCTP:
+ case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
if (sctp_configured)
break;
@@ -38,33 +38,33 @@
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
- DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_SCTP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_TIMESTAMP;
+ RTE_ETH_RX_OFFLOAD_CHECKSUM |
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP;
/* Rx offloads which cannot be disabled */
static uint64_t dev_rx_offloads_nodis =
- DEV_RX_OFFLOAD_RSS_HASH |
- DEV_RX_OFFLOAD_SCATTER;
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_SCATTER;
/* Supported Tx offloads */
static uint64_t dev_tx_offloads_sup =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_MT_LOCKFREE |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Tx offloads which cannot be disabled */
static uint64_t dev_tx_offloads_nodis =
- DEV_TX_OFFLOAD_MULTI_SEGS;
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
/* enable timestamp in mbuf */
bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
@@ -142,7 +142,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
PMD_INIT_FUNC_TRACE();
- if (mask & ETH_VLAN_FILTER_MASK) {
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
/* VLAN Filter not avaialble */
if (!priv->max_vlan_filters) {
DPAA2_PMD_INFO("VLAN filter not available");
@@ -150,7 +150,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
if (dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER)
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
priv->token, true);
else
@@ -251,13 +251,13 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_rx_offloads_nodis;
dev_info->tx_offload_capa = dev_tx_offloads_sup |
dev_tx_offloads_nodis;
- dev_info->speed_capa = ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_2_5G |
- ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_2_5G |
+ RTE_ETH_LINK_SPEED_10G;
dev_info->max_hash_mac_addrs = 0;
dev_info->max_vfs = 0;
- dev_info->max_vmdq_pools = ETH_16_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
@@ -270,10 +270,10 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
if (dpaa2_svr_family == SVR_LX2160A) {
- dev_info->speed_capa |= ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G |
- ETH_LINK_SPEED_100G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G |
+ RTE_ETH_LINK_SPEED_100G;
}
return 0;
@@ -291,15 +291,15 @@ dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
uint64_t flags;
const char *output;
} rx_offload_map[] = {
- {DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
- {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
- {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
- {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
- {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
- {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
- {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
- {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
- {DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
+ {RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
+ {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+ {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+ {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
+ {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
+ {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
+ {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+ {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
+ {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
};
/* Update Rx offload info */
@@ -326,15 +326,15 @@ dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
uint64_t flags;
const char *output;
} tx_offload_map[] = {
- {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
- {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
- {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
- {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
- {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
- {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
- {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
- {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
- {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+ {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+ {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+ {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+ {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+ {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+ {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+ {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+ {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+ {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
};
/* Update Tx offload info */
@@ -573,7 +573,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
return -1;
}
- if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
ret = dpaa2_setup_flow_dist(dev,
eth_conf->rx_adv_conf.rss_conf.rss_hf,
@@ -587,12 +587,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
}
}
- if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
rx_l3_csum_offload = true;
- if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
- (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
- (rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
+ if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
+ (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
+ (rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
rx_l4_csum_offload = true;
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -610,7 +610,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
}
#if !defined(RTE_LIBRTE_IEEE1588)
- if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
#endif
{
ret = rte_mbuf_dyn_rx_timestamp_register(
@@ -623,12 +623,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
dpaa2_enable_ts[dev->data->port_id] = true;
}
- if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
tx_l3_csum_offload = true;
- if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
- (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
- (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
tx_l4_csum_offload = true;
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -660,8 +660,8 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
}
}
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
- dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+ dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
dpaa2_tm_init(dev);
@@ -1856,7 +1856,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
return -1;
}
- if (state.up == ETH_LINK_DOWN &&
+ if (state.up == RTE_ETH_LINK_DOWN &&
wait_to_complete)
rte_delay_ms(CHECK_INTERVAL);
else
@@ -1868,9 +1868,9 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
link.link_speed = state.rate;
if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
else
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
ret = rte_eth_linkstatus_set(dev, &link);
if (ret == -1)
@@ -2031,9 +2031,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
* No TX side flow control (send Pause frame disabled)
*/
if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
} else {
/* DPNI_LINK_OPT_PAUSE not set
* if ASYM_PAUSE set,
@@ -2043,9 +2043,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
* Flow control disabled
*/
if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
}
return ret;
@@ -2089,14 +2089,14 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
/* update cfg with fc_conf */
switch (fc_conf->mode) {
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
/* Full flow control;
* OPT_PAUSE set, ASYM_PAUSE not set
*/
cfg.options |= DPNI_LINK_OPT_PAUSE;
cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
/* Enable RX flow control
* OPT_PAUSE not set;
* ASYM_PAUSE set;
@@ -2104,7 +2104,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
cfg.options &= ~DPNI_LINK_OPT_PAUSE;
break;
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
/* Enable TX Flow control
* OPT_PAUSE set
* ASYM_PAUSE set
@@ -2112,7 +2112,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
cfg.options |= DPNI_LINK_OPT_PAUSE;
cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
break;
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
/* Disable Flow control
* OPT_PAUSE not set
* ASYM_PAUSE not set
@@ -65,17 +65,17 @@
#define DPAA2_TX_CONF_ENABLE 0x08
#define DPAA2_RSS_OFFLOAD_ALL ( \
- ETH_RSS_L2_PAYLOAD | \
- ETH_RSS_IP | \
- ETH_RSS_UDP | \
- ETH_RSS_TCP | \
- ETH_RSS_SCTP | \
- ETH_RSS_MPLS | \
- ETH_RSS_C_VLAN | \
- ETH_RSS_S_VLAN | \
- ETH_RSS_ESP | \
- ETH_RSS_AH | \
- ETH_RSS_PPPOE)
+ RTE_ETH_RSS_L2_PAYLOAD | \
+ RTE_ETH_RSS_IP | \
+ RTE_ETH_RSS_UDP | \
+ RTE_ETH_RSS_TCP | \
+ RTE_ETH_RSS_SCTP | \
+ RTE_ETH_RSS_MPLS | \
+ RTE_ETH_RSS_C_VLAN | \
+ RTE_ETH_RSS_S_VLAN | \
+ RTE_ETH_RSS_ESP | \
+ RTE_ETH_RSS_AH | \
+ RTE_ETH_RSS_PPPOE)
/* LX2 FRC Parsed values (Little Endian) */
#define DPAA2_PKT_TYPE_ETHER 0x0060
@@ -773,7 +773,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
#endif
if (eth_data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP)
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
rte_vlan_strip(bufs[num_rx]);
dq_storage++;
@@ -987,7 +987,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
eth_data->port_id);
if (eth_data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP) {
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
rte_vlan_strip(bufs[num_rx]);
}
@@ -1230,7 +1230,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (unlikely(((*bufs)->ol_flags
& PKT_TX_VLAN_PKT) ||
(eth_data->dev_conf.txmode.offloads
- & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+ & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;
@@ -1273,7 +1273,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
(eth_data->dev_conf.txmode.offloads
- & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+ & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
int ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;
@@ -82,15 +82,15 @@
#define E1000_FTQF_QUEUE_ENABLE 0x00000100
#define IGB_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_EX | \
+ RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
/*
* The overhead from MTU to max frame size.
@@ -597,8 +597,8 @@ eth_em_start(struct rte_eth_dev *dev)
e1000_clear_hw_cntrs_base_generic(hw);
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
ret = eth_em_vlan_offload_set(dev, mask);
if (ret) {
PMD_INIT_LOG(ERR, "Unable to update vlan offload");
@@ -611,39 +611,39 @@ eth_em_start(struct rte_eth_dev *dev)
/* Setup link speed and duplex */
speeds = &dev->data->dev_conf.link_speeds;
- if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
hw->mac.autoneg = 1;
} else {
num_speeds = 0;
- autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+ autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
/* Reset */
hw->phy.autoneg_advertised = 0;
- if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+ if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
num_speeds = -1;
goto error_invalid_config;
}
- if (*speeds & ETH_LINK_SPEED_10M_HD) {
+ if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_10M) {
+ if (*speeds & RTE_ETH_LINK_SPEED_10M) {
hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_100M_HD) {
+ if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_100M) {
+ if (*speeds & RTE_ETH_LINK_SPEED_100M) {
hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_1G) {
+ if (*speeds & RTE_ETH_LINK_SPEED_1G) {
hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
num_speeds++;
}
@@ -1102,9 +1102,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.nb_mtu_seg_max = EM_TX_MAX_MTU_SEG,
};
- dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G;
/* Preferred queue parameters */
dev_info->default_rxportconf.nb_queues = 1;
@@ -1162,17 +1162,17 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
uint16_t duplex, speed;
hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
link.link_duplex = (duplex == FULL_DUPLEX) ?
- ETH_LINK_FULL_DUPLEX :
- ETH_LINK_HALF_DUPLEX;
+ RTE_ETH_LINK_FULL_DUPLEX :
+ RTE_ETH_LINK_HALF_DUPLEX;
link.link_speed = speed;
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
} else {
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
- link.link_status = ETH_LINK_DOWN;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
}
return rte_eth_linkstatus_set(dev, &link);
@@ -1424,15 +1424,15 @@ eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
- if(mask & ETH_VLAN_STRIP_MASK){
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
em_vlan_hw_strip_enable(dev);
else
em_vlan_hw_strip_disable(dev);
}
- if(mask & ETH_VLAN_FILTER_MASK){
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
em_vlan_hw_filter_enable(dev);
else
em_vlan_hw_filter_disable(dev);
@@ -1601,7 +1601,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
if (link.link_status) {
PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
dev->data->port_id, link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
@@ -1683,13 +1683,13 @@ eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
rx_pause = 0;
if (rx_pause && tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
@@ -93,7 +93,7 @@ struct em_rx_queue {
struct em_rx_entry *sw_ring; /**< address of RX software ring. */
struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
- uint64_t offloads; /**< Offloads of DEV_RX_OFFLOAD_* */
+ uint64_t offloads; /**< Offloads of RTE_ETH_RX_OFFLOAD_* */
uint16_t nb_rx_desc; /**< number of RX descriptors. */
uint16_t rx_tail; /**< current value of RDT register. */
uint16_t nb_rx_hold; /**< number of held free RX desc. */
@@ -173,7 +173,7 @@ struct em_tx_queue {
uint8_t wthresh; /**< Write-back threshold register. */
struct em_ctx_info ctx_cache;
/**< Hardware context history.*/
- uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+ uint64_t offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
const struct rte_memzone *mz;
};
@@ -1171,11 +1171,11 @@ em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
RTE_SET_USED(dev);
tx_offload_capa =
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
return tx_offload_capa;
}
@@ -1369,13 +1369,13 @@ em_get_rx_port_offloads_capa(void)
uint64_t rx_offload_capa;
rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_SCATTER;
return rx_offload_capa;
}
@@ -1469,7 +1469,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -1788,7 +1788,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -1831,7 +1831,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
}
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
@@ -1844,7 +1844,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
*/
rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
- if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
rxcsum |= E1000_RXCSUM_IPOFL;
else
rxcsum &= ~E1000_RXCSUM_IPOFL;
@@ -1870,7 +1870,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
}
/* Setup the Receive Control Register. */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
else
rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
@@ -1073,21 +1073,21 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
uint16_t nb_rx_q = dev->data->nb_rx_queues;
uint16_t nb_tx_q = dev->data->nb_tx_queues;
- if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
- tx_mq_mode == ETH_MQ_TX_DCB ||
- tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if ((rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) ||
+ tx_mq_mode == RTE_ETH_MQ_TX_DCB ||
+ tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
PMD_INIT_LOG(ERR, "DCB mode is not supported.");
return -EINVAL;
}
if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
/* Check multi-queue mode.
- * To no break software we accept ETH_MQ_RX_NONE as this might
+ * To no break software we accept RTE_ETH_MQ_RX_NONE as this might
* be used to turn off VLAN filter.
*/
- if (rx_mq_mode == ETH_MQ_RX_NONE ||
- rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
- dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+ if (rx_mq_mode == RTE_ETH_MQ_RX_NONE ||
+ rx_mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
+ dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
} else {
/* Only support one queue on VFs.
@@ -1099,12 +1099,12 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
return -EINVAL;
}
/* TX mode is not used here, so mode might be ignored.*/
- if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+ if (tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
/* SRIOV only works in VMDq enable mode */
PMD_INIT_LOG(WARNING, "SRIOV is active,"
" TX mode %d is not supported. "
" Driver will behave as %d mode.",
- tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
+ tx_mq_mode, RTE_ETH_MQ_TX_VMDQ_ONLY);
}
/* check valid queue number */
@@ -1117,17 +1117,17 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
/* To no break software that set invalid mode, only display
* warning if invalid mode is used.
*/
- if (rx_mq_mode != ETH_MQ_RX_NONE &&
- rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
- rx_mq_mode != ETH_MQ_RX_RSS) {
+ if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+ rx_mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY &&
+ rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
/* RSS together with VMDq not supported*/
PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
rx_mq_mode);
return -EINVAL;
}
- if (tx_mq_mode != ETH_MQ_TX_NONE &&
- tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+ if (tx_mq_mode != RTE_ETH_MQ_TX_NONE &&
+ tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
" Due to txmode is meaningless in this"
" driver, just ignore.",
@@ -1146,8 +1146,8 @@ eth_igb_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* multipe queue mode checking */
ret = igb_check_mq_mode(dev);
@@ -1287,8 +1287,8 @@ eth_igb_start(struct rte_eth_dev *dev)
/*
* VLAN Offload Settings
*/
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
ret = eth_igb_vlan_offload_set(dev, mask);
if (ret) {
PMD_INIT_LOG(ERR, "Unable to set vlan offload");
@@ -1296,7 +1296,7 @@ eth_igb_start(struct rte_eth_dev *dev)
return ret;
}
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
/* Enable VLAN filter since VMDq always use VLAN filter */
igb_vmdq_vlan_hw_filter_enable(dev);
}
@@ -1310,39 +1310,39 @@ eth_igb_start(struct rte_eth_dev *dev)
/* Setup link speed and duplex */
speeds = &dev->data->dev_conf.link_speeds;
- if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
hw->mac.autoneg = 1;
} else {
num_speeds = 0;
- autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+ autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
/* Reset */
hw->phy.autoneg_advertised = 0;
- if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+ if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
num_speeds = -1;
goto error_invalid_config;
}
- if (*speeds & ETH_LINK_SPEED_10M_HD) {
+ if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_10M) {
+ if (*speeds & RTE_ETH_LINK_SPEED_10M) {
hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_100M_HD) {
+ if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_100M) {
+ if (*speeds & RTE_ETH_LINK_SPEED_100M) {
hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_1G) {
+ if (*speeds & RTE_ETH_LINK_SPEED_1G) {
hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
num_speeds++;
}
@@ -2185,21 +2185,21 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
case e1000_82576:
dev_info->max_rx_queues = 16;
dev_info->max_tx_queues = 16;
- dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
dev_info->vmdq_queue_num = 16;
break;
case e1000_82580:
dev_info->max_rx_queues = 8;
dev_info->max_tx_queues = 8;
- dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
dev_info->vmdq_queue_num = 8;
break;
case e1000_i350:
dev_info->max_rx_queues = 8;
dev_info->max_tx_queues = 8;
- dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
dev_info->vmdq_queue_num = 8;
break;
@@ -2225,7 +2225,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
return -EINVAL;
}
dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
- dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -2251,9 +2251,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->rx_desc_lim = rx_desc_lim;
dev_info->tx_desc_lim = tx_desc_lim;
- dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G;
dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -2296,12 +2296,12 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO;
switch (hw->mac.type) {
case e1000_vfadapt:
dev_info->max_rx_queues = 2;
@@ -2402,17 +2402,17 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
uint16_t duplex, speed;
hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
link.link_duplex = (duplex == FULL_DUPLEX) ?
- ETH_LINK_FULL_DUPLEX :
- ETH_LINK_HALF_DUPLEX;
+ RTE_ETH_LINK_FULL_DUPLEX :
+ RTE_ETH_LINK_HALF_DUPLEX;
link.link_speed = speed;
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
} else if (!link_check) {
link.link_speed = 0;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
- link.link_status = ETH_LINK_DOWN;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
}
return rte_eth_linkstatus_set(dev, &link);
@@ -2588,7 +2588,7 @@ eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
qinq &= E1000_CTRL_EXT_EXT_VLAN;
/* only outer TPID of double VLAN can be configured*/
- if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
+ if (qinq && vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
reg = E1000_READ_REG(hw, E1000_VET);
reg = (reg & (~E1000_VET_VET_EXT)) |
((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
@@ -2703,22 +2703,22 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
- if(mask & ETH_VLAN_STRIP_MASK){
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
igb_vlan_hw_strip_enable(dev);
else
igb_vlan_hw_strip_disable(dev);
}
- if(mask & ETH_VLAN_FILTER_MASK){
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
igb_vlan_hw_filter_enable(dev);
else
igb_vlan_hw_filter_disable(dev);
}
- if(mask & ETH_VLAN_EXTEND_MASK){
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
igb_vlan_hw_extend_enable(dev);
else
igb_vlan_hw_extend_disable(dev);
@@ -2870,7 +2870,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
" Port %d: Link Up - speed %u Mbps - %s",
dev->data->port_id,
(unsigned)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -3024,13 +3024,13 @@ eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
rx_pause = 0;
if (rx_pause && tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
@@ -3099,18 +3099,18 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
* on configuration
*/
switch (fc_conf->mode) {
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
ctrl &= ~E1000_CTRL_RFCE & ~E1000_CTRL_TFCE;
break;
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
ctrl |= E1000_CTRL_RFCE;
ctrl &= ~E1000_CTRL_TFCE;
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
ctrl |= E1000_CTRL_TFCE;
ctrl &= ~E1000_CTRL_RFCE;
break;
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
ctrl |= E1000_CTRL_RFCE | E1000_CTRL_TFCE;
break;
default:
@@ -3258,22 +3258,22 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/*
* VF has no ability to enable/disable HW CRC
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
- if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+ conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
}
#else
- if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+ if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+ conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
}
#endif
@@ -3571,16 +3571,16 @@ eth_igb_rss_reta_update(struct rte_eth_dev *dev,
uint16_t idx, shift;
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
IGB_4_BIT_MASK);
if (!mask)
@@ -3612,16 +3612,16 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
uint16_t idx, shift;
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
IGB_4_BIT_MASK);
if (!mask)
@@ -88,7 +88,7 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
if (*vfinfo == NULL)
rte_panic("Cannot allocate memory for private VF data\n");
- RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_8_POOLS;
RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
@@ -111,7 +111,7 @@ struct igb_rx_queue {
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint32_t flags; /**< RX flags. */
- uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
+ uint64_t offloads; /**< offloads of RTE_ETH_RX_OFFLOAD_* */
const struct rte_memzone *mz;
};
@@ -186,7 +186,7 @@ struct igb_tx_queue {
/**< Start context position for transmit queue. */
struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
/**< Hardware context history.*/
- uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+ uint64_t offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
const struct rte_memzone *mz;
};
@@ -1459,13 +1459,13 @@ igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
uint64_t tx_offload_capa;
RTE_SET_USED(dev);
- tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS;
+ tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
return tx_offload_capa;
}
@@ -1640,19 +1640,19 @@ igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_RSS_HASH;
+ rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
if (hw->mac.type == e1000_i350 ||
hw->mac.type == e1000_i210 ||
hw->mac.type == e1000_i211)
- rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
return rx_offload_capa;
}
@@ -1733,7 +1733,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -1950,23 +1950,23 @@ igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
/* Set configured hashing protocols in MRQC register */
rss_hf = rss_conf->rss_hf;
mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & RTE_ETH_RSS_IPV4)
mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
- if (rss_hf & ETH_RSS_IPV6)
+ if (rss_hf & RTE_ETH_RSS_IPV6)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
- if (rss_hf & ETH_RSS_IPV6_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_EX)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
- if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
- if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
}
@@ -2032,23 +2032,23 @@ int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
}
rss_hf = 0;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
- rss_hf |= ETH_RSS_IPV4;
+ rss_hf |= RTE_ETH_RSS_IPV4;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
- rss_hf |= ETH_RSS_IPV6;
+ rss_hf |= RTE_ETH_RSS_IPV6;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
- rss_hf |= ETH_RSS_IPV6_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_EX;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
- rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
- rss_hf |= ETH_RSS_IPV6_UDP_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
rss_conf->rss_hf = rss_hf;
return 0;
}
@@ -2170,15 +2170,15 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
E1000_VMOLR_MPME);
- if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
+ if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_UNTAG)
vmolr |= E1000_VMOLR_AUPE;
- if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
+ if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
vmolr |= E1000_VMOLR_ROMPE;
- if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
+ if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
vmolr |= E1000_VMOLR_ROPE;
- if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
+ if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
vmolr |= E1000_VMOLR_BAM;
- if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
+ if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
vmolr |= E1000_VMOLR_MPME;
E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
@@ -2214,9 +2214,9 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
/* VLVF: set up filters for vlan tags as configured */
for (i = 0; i < cfg->nb_pool_maps; i++) {
/* set vlan id in VF register and set the valid bit */
- E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
- (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
- ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
+ E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE |
+ (cfg->pool_map[i].vlan_id & RTE_ETH_VLAN_ID_MAX) |
+ ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT) &
E1000_VLVF_POOLSEL_MASK)));
}
@@ -2268,7 +2268,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t mrqc;
- if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
+ if (RTE_ETH_DEV_SRIOV(dev).active == RTE_ETH_8_POOLS) {
/*
* SRIOV active scheme
* FIXME if support RSS together with VMDq & SRIOV
@@ -2282,14 +2282,14 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
* SRIOV inactive scheme
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_RSS:
igb_rss_configure(dev);
break;
- case ETH_MQ_RX_VMDQ_ONLY:
+ case RTE_ETH_MQ_RX_VMDQ_ONLY:
/*Configure general VMDQ only RX parameters*/
igb_vmdq_rx_hw_configure(dev);
break;
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_NONE:
/* if mq_mode is none, disable rss mode.*/
default:
igb_rss_disable(dev);
@@ -2338,7 +2338,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
* Set maximum packet length by default, and might be updated
* together with enabling/disabling dual VLAN.
*/
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
max_len += VLAN_TAG_SIZE;
E1000_WRITE_REG(hw, E1000_RLPML, max_len);
@@ -2374,7 +2374,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -2444,7 +2444,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2488,16 +2488,16 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
rxcsum |= E1000_RXCSUM_PCSD;
/* Enable both L3/L4 rx checksum offload */
- if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
rxcsum |= E1000_RXCSUM_IPOFL;
else
rxcsum &= ~E1000_RXCSUM_IPOFL;
if (rxmode->offloads &
- (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+ (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
rxcsum |= E1000_RXCSUM_TUOFL;
else
rxcsum &= ~E1000_RXCSUM_TUOFL;
- if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
rxcsum |= E1000_RXCSUM_CRCOFL;
else
rxcsum &= ~E1000_RXCSUM_CRCOFL;
@@ -2505,7 +2505,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
/* Setup the Receive Control Register. */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
/* clear STRCRC bit in all queues */
@@ -2545,7 +2545,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
/* Make sure VLAN Filters are off. */
- if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
+ if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY)
rctl &= ~E1000_RCTL_VFE;
/* Don't store bad packets. */
rctl &= ~E1000_RCTL_SBP;
@@ -2743,7 +2743,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -117,10 +117,10 @@ static const struct ena_stats ena_stats_rx_strings[] = {
#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
-#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
- DEV_TX_OFFLOAD_UDP_CKSUM |\
- DEV_TX_OFFLOAD_IPV4_CKSUM |\
- DEV_TX_OFFLOAD_TCP_TSO)
+#define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\
+ RTE_ETH_TX_OFFLOAD_TCP_TSO)
#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
PKT_TX_IP_CKSUM |\
PKT_TX_TCP_SEG)
@@ -332,7 +332,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
(queue_offloads & QUEUE_OFFLOADS)) {
/* check if TSO is required */
if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
- (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
+ (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
ena_tx_ctx->tso_enable = true;
ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
@@ -340,7 +340,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
/* check if L3 checksum is needed */
if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
- (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
+ (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
ena_tx_ctx->l3_csum_enable = true;
if (mbuf->ol_flags & PKT_TX_IPV6) {
@@ -357,12 +357,12 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
/* check if L4 checksum is needed */
if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
- (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
+ (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
ena_tx_ctx->l4_csum_enable = true;
} else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
PKT_TX_UDP_CKSUM) &&
- (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+ (queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
ena_tx_ctx->l4_csum_enable = true;
} else {
@@ -643,9 +643,9 @@ static int ena_link_update(struct rte_eth_dev *dev,
struct rte_eth_link *link = &dev->data->dev_link;
struct ena_adapter *adapter = dev->data->dev_private;
- link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
- link->link_speed = ETH_SPEED_NUM_NONE;
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
return 0;
}
@@ -923,7 +923,7 @@ static int ena_start(struct rte_eth_dev *dev)
if (rc)
goto err_start_tx;
- if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
rc = ena_rss_configure(adapter);
if (rc)
goto err_rss_init;
@@ -2004,9 +2004,9 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
adapter->state = ENA_ADAPTER_STATE_CONFIG;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
- dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
/* Scattered Rx cannot be turned off in the HW, so this capability must
* be forced.
@@ -2067,17 +2067,17 @@ static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter)
uint64_t port_offloads = 0;
if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
- port_offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+ port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
if (adapter->offloads.rx_offloads &
(ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
port_offloads |=
- DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
- port_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
- port_offloads |= DEV_RX_OFFLOAD_SCATTER;
+ port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
return port_offloads;
}
@@ -2087,17 +2087,17 @@ static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter)
uint64_t port_offloads = 0;
if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
- port_offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
- port_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+ port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
if (adapter->offloads.tx_offloads &
(ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
port_offloads |=
- DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
- port_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
return port_offloads;
}
@@ -2130,14 +2130,14 @@ static int ena_infos_get(struct rte_eth_dev *dev,
ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
dev_info->speed_capa =
- ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_2_5G |
- ETH_LINK_SPEED_5G |
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G |
- ETH_LINK_SPEED_100G;
+ RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_2_5G |
+ RTE_ETH_LINK_SPEED_5G |
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G |
+ RTE_ETH_LINK_SPEED_100G;
/* Inform framework about available features */
dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter);
@@ -2303,7 +2303,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
}
#endif
- fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH;
+ fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH;
descs_in_use = rx_ring->ring_size -
ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
@@ -2416,11 +2416,11 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
/* Check if requested offload is also enabled for the queue */
if ((ol_flags & PKT_TX_IP_CKSUM &&
- !(tx_ring->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) ||
+ !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ||
(l4_csum_flag == PKT_TX_TCP_CKSUM &&
- !(tx_ring->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) ||
+ !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) ||
(l4_csum_flag == PKT_TX_UDP_CKSUM &&
- !(tx_ring->offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) {
+ !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) {
PMD_TX_LOG(DEBUG,
"mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n",
i, m->nb_segs, tx_ring->id);
@@ -58,8 +58,8 @@
#define ENA_HASH_KEY_SIZE 40
-#define ENA_ALL_RSS_HF (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP)
+#define ENA_ALL_RSS_HF (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_NONFRAG_IPV6_UDP)
#define ENA_IO_TXQ_IDX(q) (2 * (q))
#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
@@ -76,7 +76,7 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
if (reta_size == 0 || reta_conf == NULL)
return -EINVAL;
- if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+ if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
PMD_DRV_LOG(ERR,
"RSS was not configured for the PMD\n");
return -ENOTSUP;
@@ -93,8 +93,8 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
/* Each reta_conf is for 64 entries.
* To support 128 we use 2 conf of 64.
*/
- conf_idx = i / RTE_RETA_GROUP_SIZE;
- idx = i % RTE_RETA_GROUP_SIZE;
+ conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ idx = i % RTE_ETH_RETA_GROUP_SIZE;
if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
entry_value =
ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
@@ -139,7 +139,7 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
if (reta_size == 0 || reta_conf == NULL)
return -EINVAL;
- if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+ if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
PMD_DRV_LOG(ERR,
"RSS was not configured for the PMD\n");
return -ENOTSUP;
@@ -154,8 +154,8 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
}
for (i = 0 ; i < reta_size ; i++) {
- reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
- reta_idx = i % RTE_RETA_GROUP_SIZE;
+ reta_conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ reta_idx = i % RTE_ETH_RETA_GROUP_SIZE;
if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
reta_conf[reta_conf_idx].reta[reta_idx] =
ENA_IO_RXQ_IDX_REV(indirect_table[i]);
@@ -199,34 +199,34 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
/* Convert proto to ETH flag */
switch (proto) {
case ENA_ADMIN_RSS_TCP4:
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
break;
case ENA_ADMIN_RSS_UDP4:
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
break;
case ENA_ADMIN_RSS_TCP6:
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
break;
case ENA_ADMIN_RSS_UDP6:
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
break;
case ENA_ADMIN_RSS_IP4:
- rss_hf |= ETH_RSS_IPV4;
+ rss_hf |= RTE_ETH_RSS_IPV4;
break;
case ENA_ADMIN_RSS_IP6:
- rss_hf |= ETH_RSS_IPV6;
+ rss_hf |= RTE_ETH_RSS_IPV6;
break;
case ENA_ADMIN_RSS_IP4_FRAG:
- rss_hf |= ETH_RSS_FRAG_IPV4;
+ rss_hf |= RTE_ETH_RSS_FRAG_IPV4;
break;
case ENA_ADMIN_RSS_NOT_IP:
- rss_hf |= ETH_RSS_L2_PAYLOAD;
+ rss_hf |= RTE_ETH_RSS_L2_PAYLOAD;
break;
case ENA_ADMIN_RSS_TCP6_EX:
- rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
break;
case ENA_ADMIN_RSS_IP6_EX:
- rss_hf |= ETH_RSS_IPV6_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_EX;
break;
default:
break;
@@ -235,10 +235,10 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
/* Check if only DA or SA is being used for L3. */
switch (fields & ENA_HF_RSS_ALL_L3) {
case ENA_ADMIN_RSS_L3_SA:
- rss_hf |= ETH_RSS_L3_SRC_ONLY;
+ rss_hf |= RTE_ETH_RSS_L3_SRC_ONLY;
break;
case ENA_ADMIN_RSS_L3_DA:
- rss_hf |= ETH_RSS_L3_DST_ONLY;
+ rss_hf |= RTE_ETH_RSS_L3_DST_ONLY;
break;
default:
break;
@@ -247,10 +247,10 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
/* Check if only DA or SA is being used for L4. */
switch (fields & ENA_HF_RSS_ALL_L4) {
case ENA_ADMIN_RSS_L4_SP:
- rss_hf |= ETH_RSS_L4_SRC_ONLY;
+ rss_hf |= RTE_ETH_RSS_L4_SRC_ONLY;
break;
case ENA_ADMIN_RSS_L4_DP:
- rss_hf |= ETH_RSS_L4_DST_ONLY;
+ rss_hf |= RTE_ETH_RSS_L4_DST_ONLY;
break;
default:
break;
@@ -268,11 +268,11 @@ static uint16_t ena_eth_hf_to_admin_hf(enum ena_admin_flow_hash_proto proto,
fields_mask = ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
/* Determine which fields of L3 should be used. */
- switch (rss_hf & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) {
- case ETH_RSS_L3_DST_ONLY:
+ switch (rss_hf & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) {
+ case RTE_ETH_RSS_L3_DST_ONLY:
fields_mask |= ENA_ADMIN_RSS_L3_DA;
break;
- case ETH_RSS_L3_SRC_ONLY:
+ case RTE_ETH_RSS_L3_SRC_ONLY:
fields_mask |= ENA_ADMIN_RSS_L3_SA;
break;
default:
@@ -284,11 +284,11 @@ static uint16_t ena_eth_hf_to_admin_hf(enum ena_admin_flow_hash_proto proto,
}
/* Determine which fields of L4 should be used. */
- switch (rss_hf & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) {
- case ETH_RSS_L4_DST_ONLY:
+ switch (rss_hf & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) {
+ case RTE_ETH_RSS_L4_DST_ONLY:
fields_mask |= ENA_ADMIN_RSS_L4_DP;
break;
- case ETH_RSS_L4_SRC_ONLY:
+ case RTE_ETH_RSS_L4_SRC_ONLY:
fields_mask |= ENA_ADMIN_RSS_L4_SP;
break;
default:
@@ -334,43 +334,43 @@ static int ena_set_hash_fields(struct ena_com_dev *ena_dev, uint64_t rss_hf)
int rc, i;
/* Turn on appropriate fields for each requested packet type */
- if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
+ if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
selected_fields[ENA_ADMIN_RSS_TCP4].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP4, rss_hf);
- if ((rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) != 0)
+ if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
selected_fields[ENA_ADMIN_RSS_UDP4].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP4, rss_hf);
- if ((rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) != 0)
+ if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
selected_fields[ENA_ADMIN_RSS_TCP6].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6, rss_hf);
- if ((rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) != 0)
+ if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
selected_fields[ENA_ADMIN_RSS_UDP6].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP6, rss_hf);
- if ((rss_hf & ETH_RSS_IPV4) != 0)
+ if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
selected_fields[ENA_ADMIN_RSS_IP4].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4, rss_hf);
- if ((rss_hf & ETH_RSS_IPV6) != 0)
+ if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
selected_fields[ENA_ADMIN_RSS_IP6].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6, rss_hf);
- if ((rss_hf & ETH_RSS_FRAG_IPV4) != 0)
+ if ((rss_hf & RTE_ETH_RSS_FRAG_IPV4) != 0)
selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4_FRAG, rss_hf);
- if ((rss_hf & ETH_RSS_L2_PAYLOAD) != 0)
+ if ((rss_hf & RTE_ETH_RSS_L2_PAYLOAD) != 0)
selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_NOT_IP, rss_hf);
- if ((rss_hf & ETH_RSS_IPV6_TCP_EX) != 0)
+ if ((rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) != 0)
selected_fields[ENA_ADMIN_RSS_TCP6_EX].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6_EX, rss_hf);
- if ((rss_hf & ETH_RSS_IPV6_EX) != 0)
+ if ((rss_hf & RTE_ETH_RSS_IPV6_EX) != 0)
selected_fields[ENA_ADMIN_RSS_IP6_EX].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6_EX, rss_hf);
@@ -541,7 +541,7 @@ int ena_rss_hash_conf_get(struct rte_eth_dev *dev,
uint16_t admin_hf;
static bool warn_once;
- if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+ if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
PMD_DRV_LOG(ERR, "RSS was not configured for the PMD\n");
return -ENOTSUP;
}
@@ -100,27 +100,27 @@ enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
if (status & ENETC_LINK_MODE)
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
else
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
if (status & ENETC_LINK_STATUS)
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
else
- link.link_status = ETH_LINK_DOWN;
+ link.link_status = RTE_ETH_LINK_DOWN;
switch (status & ENETC_LINK_SPEED_MASK) {
case ENETC_LINK_SPEED_1G:
- link.link_speed = ETH_SPEED_NUM_1G;
+ link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case ENETC_LINK_SPEED_100M:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
default:
case ENETC_LINK_SPEED_10M:
- link.link_speed = ETH_SPEED_NUM_10M;
+ link.link_speed = RTE_ETH_SPEED_NUM_10M;
}
return rte_eth_linkstatus_set(dev, &link);
@@ -207,10 +207,10 @@ enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
dev_info->max_tx_queues = MAX_TX_RINGS;
dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
dev_info->rx_offload_capa =
- (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC);
+ (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC);
return 0;
}
@@ -463,7 +463,7 @@ enetc_rx_queue_setup(struct rte_eth_dev *dev,
RTE_ETH_QUEUE_STATE_STOPPED;
}
- rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+ rx_ring->crc_len = (uint8_t)((rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
RTE_ETHER_CRC_LEN : 0);
return 0;
@@ -705,7 +705,7 @@ enetc_dev_configure(struct rte_eth_dev *dev)
enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
- if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
int config;
config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
@@ -713,10 +713,10 @@ enetc_dev_configure(struct rte_eth_dev *dev)
enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
}
- if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
checksum &= ~L3_CKSUM;
- if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
+ if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
checksum &= ~L4_CKSUM;
enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
@@ -178,7 +178,7 @@ struct enic {
*/
uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
uint8_t rss_enable;
- uint64_t rss_hf; /* ETH_RSS flags */
+ uint64_t rss_hf; /* RTE_ETH_RSS flags */
union vnic_rss_key rss_key;
union vnic_rss_cpu rss_cpu;
@@ -38,30 +38,30 @@ static const struct vic_speed_capa {
uint16_t sub_devid;
uint32_t capa;
} vic_speed_capa_map[] = {
- { 0x0043, ETH_LINK_SPEED_10G }, /* VIC */
- { 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */
- { 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */
- { 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */
- { 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */
- { 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */
- { 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */
- { 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */
- { 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */
- { 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */
- { 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */
- { 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */
- { 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */
- { 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */
- { 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G }, /* 1440 Mezz */
- { 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G }, /* 1480 MLOM */
- { 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */
- { 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */
- { 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */
- { 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */
- { 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */
- { 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */
+ { 0x0043, RTE_ETH_LINK_SPEED_10G }, /* VIC */
+ { 0x0047, RTE_ETH_LINK_SPEED_10G }, /* P81E PCIe */
+ { 0x0048, RTE_ETH_LINK_SPEED_10G }, /* M81KR Mezz */
+ { 0x004f, RTE_ETH_LINK_SPEED_10G }, /* 1280 Mezz */
+ { 0x0084, RTE_ETH_LINK_SPEED_10G }, /* 1240 MLOM */
+ { 0x0085, RTE_ETH_LINK_SPEED_10G }, /* 1225 PCIe */
+ { 0x00cd, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1285 PCIe */
+ { 0x00ce, RTE_ETH_LINK_SPEED_10G }, /* 1225T PCIe */
+ { 0x012a, RTE_ETH_LINK_SPEED_40G }, /* M4308 */
+ { 0x012c, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1340 MLOM */
+ { 0x012e, RTE_ETH_LINK_SPEED_10G }, /* 1227 PCIe */
+ { 0x0137, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1380 Mezz */
+ { 0x014d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1385 PCIe */
+ { 0x015d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1387 MLOM */
+ { 0x0215, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G }, /* 1440 Mezz */
+ { 0x0216, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G }, /* 1480 MLOM */
+ { 0x0217, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1455 PCIe */
+ { 0x0218, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1457 MLOM */
+ { 0x0219, RTE_ETH_LINK_SPEED_40G }, /* 1485 PCIe */
+ { 0x021a, RTE_ETH_LINK_SPEED_40G }, /* 1487 MLOM */
+ { 0x024a, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1495 PCIe */
+ { 0x024b, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1497 MLOM */
{ 0, 0 }, /* End marker */
};
@@ -297,8 +297,8 @@ static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
ENICPMD_FUNC_TRACE();
offloads = eth_dev->data->dev_conf.rxmode.offloads;
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
enic->ig_vlan_strip_en = 1;
else
enic->ig_vlan_strip_en = 0;
@@ -323,17 +323,17 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
return ret;
}
- if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
enic->mc_count = 0;
enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CHECKSUM);
+ RTE_ETH_RX_OFFLOAD_CHECKSUM);
/* All vlan offload masks to apply the current settings */
- mask = ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
ret = enicpmd_vlan_offload_set(eth_dev, mask);
if (ret) {
dev_err(enic, "Failed to configure VLAN offloads\n");
@@ -435,14 +435,14 @@ static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
}
/* 1300 and later models are at least 40G */
if (id >= 0x0100)
- return ETH_LINK_SPEED_40G;
+ return RTE_ETH_LINK_SPEED_40G;
/* VFs have subsystem id 0, check device id */
if (id == 0) {
/* Newer VF implies at least 40G model */
if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
- return ETH_LINK_SPEED_40G;
+ return RTE_ETH_LINK_SPEED_40G;
}
- return ETH_LINK_SPEED_10G;
+ return RTE_ETH_LINK_SPEED_10G;
}
static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
@@ -774,8 +774,8 @@ static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
enic->rss_cpu.cpu[i / 4].b[i % 4]);
@@ -806,8 +806,8 @@ static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
*/
rss_cpu = enic->rss_cpu;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
rss_cpu.cpu[i / 4].b[i % 4] =
enic_rte_rq_idx_to_sop_idx(
@@ -883,7 +883,7 @@ static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
*/
conf->offloads = enic->rx_offload_capa;
if (!enic->ig_vlan_strip_en)
- conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
/* rx_thresh and other fields are not applicable for enic */
}
@@ -969,8 +969,8 @@ static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
static int udp_tunnel_common_check(struct enic *enic,
struct rte_eth_udp_tunnel *tnl)
{
- if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN &&
- tnl->prot_type != RTE_TUNNEL_TYPE_GENEVE)
+ if (tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN &&
+ tnl->prot_type != RTE_ETH_TUNNEL_TYPE_GENEVE)
return -ENOTSUP;
if (!enic->overlay_offload) {
ENICPMD_LOG(DEBUG, " overlay offload is not supported\n");
@@ -1010,7 +1010,7 @@ static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
ret = udp_tunnel_common_check(enic, tnl);
if (ret)
return ret;
- vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+ vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
if (vxlan)
port = enic->vxlan_port;
else
@@ -1039,7 +1039,7 @@ static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
ret = udp_tunnel_common_check(enic, tnl);
if (ret)
return ret;
- vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+ vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
if (vxlan)
port = enic->vxlan_port;
else
@@ -430,7 +430,7 @@ int enic_link_update(struct rte_eth_dev *eth_dev)
memset(&link, 0, sizeof(link));
link.link_status = enic_get_link_status(enic);
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link.link_speed = vnic_dev_port_speed(enic->vdev);
return rte_eth_linkstatus_set(eth_dev, &link);
@@ -597,7 +597,7 @@ int enic_enable(struct enic *enic)
}
eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
/* vnic notification of link status has already been turned on in
* enic_dev_init() which is called during probe time. Here we are
@@ -638,11 +638,11 @@ int enic_enable(struct enic *enic)
* and vlan insertion are supported.
*/
simple_tx_offloads = enic->tx_offload_capa &
- (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
+ (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
if ((eth_dev->data->dev_conf.txmode.offloads &
~simple_tx_offloads) == 0) {
ENICPMD_LOG(DEBUG, " use the simple tx handler");
@@ -858,7 +858,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
if (enic->rte_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_SCATTER) {
+ RTE_ETH_RX_OFFLOAD_SCATTER) {
dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
/* ceil((max pkt len)/mbuf_size) */
mbufs_per_pkt = (max_rx_pktlen + mbuf_size - 1) / mbuf_size;
@@ -1385,15 +1385,15 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
rss_hash_type = 0;
rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
if (enic->rq_count > 1 &&
- (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+ (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
rss_hf != 0) {
rss_enable = 1;
- if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER))
+ if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
if (enic->udp_rss_weak) {
/*
@@ -1404,12 +1404,12 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
}
}
- if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX |
- ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))
+ if (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_IPV6_EX |
+ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
- if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX))
+ if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX))
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
- if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
+ if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX)) {
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
if (enic->udp_rss_weak)
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
@@ -1745,9 +1745,9 @@ enic_enable_overlay_offload(struct enic *enic)
return -EINVAL;
}
enic->tx_offload_capa |=
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- (enic->geneve ? DEV_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
- (enic->vxlan ? DEV_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ (enic->geneve ? RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
+ (enic->vxlan ? RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
enic->tx_offload_mask |=
PKT_TX_OUTER_IPV6 |
PKT_TX_OUTER_IPV4 |
@@ -147,31 +147,31 @@ int enic_get_vnic_config(struct enic *enic)
* IPV4 hash type handles both non-frag and frag packet types.
* TCP/UDP is controlled via a separate flag below.
*/
- enic->flow_type_rss_offloads |= ETH_RSS_IPV4 |
- ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER;
+ enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV4 |
+ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
if (ENIC_SETTING(enic, RSSHASH_TCPIPV4))
- enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP;
+ enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (ENIC_SETTING(enic, RSSHASH_IPV6))
/*
* The VIC adapter can perform RSS on IPv6 packets with and
* without extension headers. An IPv6 "fragment" is an IPv6
* packet with the fragment extension header.
*/
- enic->flow_type_rss_offloads |= ETH_RSS_IPV6 |
- ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_OTHER;
+ enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_FRAG_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
if (ENIC_SETTING(enic, RSSHASH_TCPIPV6))
- enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_IPV6_TCP_EX;
+ enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_IPV6_TCP_EX;
if (enic->udp_rss_weak)
enic->flow_type_rss_offloads |=
- ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_IPV6_UDP_EX;
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_IPV6_UDP_EX;
if (ENIC_SETTING(enic, RSSHASH_UDPIPV4))
- enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP;
+ enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (ENIC_SETTING(enic, RSSHASH_UDPIPV6))
- enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_IPV6_UDP_EX;
+ enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_IPV6_UDP_EX;
/* Zero offloads if RSS is not enabled */
if (!ENIC_SETTING(enic, RSS))
@@ -201,19 +201,19 @@ int enic_get_vnic_config(struct enic *enic)
enic->tx_queue_offload_capa = 0;
enic->tx_offload_capa =
enic->tx_queue_offload_capa |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO;
enic->rx_offload_capa =
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
enic->tx_offload_mask =
PKT_TX_IPV6 |
PKT_TX_IPV4 |
@@ -17,10 +17,10 @@
const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME;
static const struct rte_eth_link eth_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_UP,
- .link_autoneg = ETH_LINK_AUTONEG,
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_UP,
+ .link_autoneg = RTE_ETH_LINK_AUTONEG,
};
static int
@@ -326,7 +326,7 @@ int failsafe_rx_intr_install_subdevice(struct sub_device *sdev)
int qid;
struct rte_eth_dev *fsdev;
struct rxq **rxq;
- const struct rte_intr_conf *const intr_conf =
+ const struct rte_eth_intr_conf *const intr_conf =
Ð(sdev)->data->dev_conf.intr_conf;
fsdev = fs_dev(sdev);
@@ -519,7 +519,7 @@ int
failsafe_rx_intr_install(struct rte_eth_dev *dev)
{
struct fs_priv *priv = PRIV(dev);
- const struct rte_intr_conf *const intr_conf =
+ const struct rte_eth_intr_conf *const intr_conf =
&priv->data->dev_conf.intr_conf;
if (intr_conf->rxq == 0 || dev->intr_handle != NULL)
@@ -1172,51 +1172,51 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
* configuring a sub-device.
*/
infos->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_MACSEC_STRIP |
- DEV_RX_OFFLOAD_HEADER_SPLIT |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_SECURITY |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+ RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+ RTE_ETH_RX_OFFLOAD_SECURITY |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
infos->rx_queue_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_MACSEC_STRIP |
- DEV_RX_OFFLOAD_HEADER_SPLIT |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_SECURITY |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+ RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+ RTE_ETH_RX_OFFLOAD_SECURITY |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
infos->tx_offload_capa =
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO;
infos->flow_type_rss_offloads =
- ETH_RSS_IP |
- ETH_RSS_UDP |
- ETH_RSS_TCP;
+ RTE_ETH_RSS_IP |
+ RTE_ETH_RSS_UDP |
+ RTE_ETH_RSS_TCP;
infos->dev_capa =
RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
@@ -177,7 +177,7 @@ struct fm10k_rx_queue {
uint8_t drop_en;
uint8_t rx_deferred_start; /* don't start this queue in dev start. */
uint16_t rx_ftag_en; /* indicates FTAG RX supported */
- uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */
+ uint64_t offloads; /* offloads of RTE_ETH_RX_OFFLOAD_* */
};
/*
@@ -209,7 +209,7 @@ struct fm10k_tx_queue {
uint16_t next_rs; /* Next pos to set RS flag */
uint16_t next_dd; /* Next pos to check DD flag */
volatile uint32_t *tail_ptr;
- uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */
+ uint64_t offloads; /* Offloads of RTE_ETH_TX_OFFLOAD_* */
uint16_t nb_desc;
uint16_t port_id;
uint8_t tx_deferred_start; /** don't start this queue in dev start. */
@@ -413,12 +413,12 @@ fm10k_check_mq_mode(struct rte_eth_dev *dev)
vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
- if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
PMD_INIT_LOG(ERR, "DCB mode is not supported.");
return -EINVAL;
}
- if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+ if (!(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
return 0;
if (hw->mac.type == fm10k_mac_vf) {
@@ -449,8 +449,8 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* multipe queue mode checking */
ret = fm10k_check_mq_mode(dev);
@@ -510,7 +510,7 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
};
- if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
+ if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_RSS ||
dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
return;
@@ -547,15 +547,15 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
*/
hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
mrqc = 0;
- mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
- mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
if (mrqc == 0) {
PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
@@ -602,7 +602,7 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
if (hw->mac.type != fm10k_mac_pf)
return;
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
nb_queue_pools = vmdq_conf->nb_queue_pools;
/* no pool number change, no need to update logic port and VLAN/MAC */
@@ -759,7 +759,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
/* It adds dual VLAN length for supporting dual VLAN */
if ((dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
- rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
+ rxq->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
uint32_t reg;
dev->data->scattered_rx = 1;
reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
@@ -1145,7 +1145,7 @@ fm10k_dev_start(struct rte_eth_dev *dev)
}
/* Update default vlan when not in VMDQ mode */
- if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+ if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
fm10k_link_update(dev, 0);
@@ -1222,11 +1222,11 @@ fm10k_link_update(struct rte_eth_dev *dev,
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_50G;
- dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_50G;
+ dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
dev->data->dev_link.link_status =
- dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
- dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
+ dev_info->sm_down ? RTE_ETH_LINK_DOWN : RTE_ETH_LINK_UP;
+ dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
return 0;
}
@@ -1378,7 +1378,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
dev_info->max_vfs = pdev->max_vfs;
dev_info->vmdq_pool_base = 0;
dev_info->vmdq_queue_base = 0;
- dev_info->max_vmdq_pools = ETH_32_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_32_POOLS;
dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
@@ -1389,15 +1389,15 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
dev_info->reta_size = FM10K_MAX_RSS_INDICES;
- dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
- ETH_RSS_IPV6 |
- ETH_RSS_IPV6_EX |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_IPV6_TCP_EX |
- ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_IPV6_UDP_EX;
+ dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+ RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_IPV6_EX |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_IPV6_TCP_EX |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_IPV6_UDP_EX;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -1435,9 +1435,9 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
.nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
};
- dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
- ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G |
+ RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G;
return 0;
}
@@ -1509,7 +1509,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
return -EINVAL;
}
- if (vlan_id > ETH_VLAN_ID_MAX) {
+ if (vlan_id > RTE_ETH_VLAN_ID_MAX) {
PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
return -EINVAL;
}
@@ -1767,20 +1767,20 @@ static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
{
RTE_SET_USED(dev);
- return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
+ return (uint64_t)(RTE_ETH_RX_OFFLOAD_SCATTER);
}
static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
{
RTE_SET_USED(dev);
- return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_HEADER_SPLIT |
- DEV_RX_OFFLOAD_RSS_HASH);
+ return (uint64_t)(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH);
}
static int
@@ -1965,12 +1965,12 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
{
RTE_SET_USED(dev);
- return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO);
+ return (uint64_t)(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO);
}
static int
@@ -2111,8 +2111,8 @@ fm10k_reta_update(struct rte_eth_dev *dev,
* 128-entries in 32 registers
*/
for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
BIT_MASK_PER_UINT32);
if (mask == 0)
@@ -2160,8 +2160,8 @@ fm10k_reta_query(struct rte_eth_dev *dev,
* 128-entries in 32 registers
*/
for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
BIT_MASK_PER_UINT32);
if (mask == 0)
@@ -2198,15 +2198,15 @@ fm10k_rss_hash_update(struct rte_eth_dev *dev,
return -EINVAL;
mrqc = 0;
- mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
- mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
/* If the mapping doesn't fit any supported, return */
if (mrqc == 0)
@@ -2243,15 +2243,15 @@ fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
hf = 0;
- hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
- hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
- hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
- hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
- hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
- hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
- hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
- hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
- hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
+ hf |= (mrqc & FM10K_MRQC_IPV4) ? RTE_ETH_RSS_IPV4 : 0;
+ hf |= (mrqc & FM10K_MRQC_IPV6) ? RTE_ETH_RSS_IPV6 : 0;
+ hf |= (mrqc & FM10K_MRQC_IPV6) ? RTE_ETH_RSS_IPV6_EX : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_TCP : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_TCP : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_IPV6_TCP_EX : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_UDP : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_UDP : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_IPV6_UDP_EX : 0;
rss_conf->rss_hf = hf;
@@ -2606,7 +2606,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
/* first clear the internal SW recording structure */
if (!(dev->data->dev_conf.rxmode.mq_mode &
- ETH_MQ_RX_VMDQ_FLAG))
+ RTE_ETH_MQ_RX_VMDQ_FLAG))
fm10k_vlan_filter_set(dev, hw->mac.default_vid,
false);
@@ -2622,7 +2622,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
MAIN_VSI_POOL_NUMBER);
if (!(dev->data->dev_conf.rxmode.mq_mode &
- ETH_MQ_RX_VMDQ_FLAG))
+ RTE_ETH_MQ_RX_VMDQ_FLAG))
fm10k_vlan_filter_set(dev, hw->mac.default_vid,
true);
@@ -208,11 +208,11 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
- struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+ struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
#ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
/* whithout rx ol_flags, no VP flag report */
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
return -1;
#endif
@@ -221,7 +221,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
return -1;
/* no header split support */
- if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
return -1;
return 0;
@@ -1320,28 +1320,28 @@ hinic_cable_status_event(u8 cmd, void *buf_in, __rte_unused u16 in_size,
static int hinic_link_event_process(struct hinic_hwdev *hwdev,
struct rte_eth_dev *eth_dev, u8 status)
{
- uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
- ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
- ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
- ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+ uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+ RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+ RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+ RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
struct nic_port_info port_info;
struct rte_eth_link link;
int rc = HINIC_OK;
if (!status) {
- link.link_status = ETH_LINK_DOWN;
+ link.link_status = RTE_ETH_LINK_DOWN;
link.link_speed = 0;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
} else {
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
memset(&port_info, 0, sizeof(port_info));
rc = hinic_get_port_info(hwdev, &port_info);
if (rc) {
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
} else {
link.link_speed = port_speed[port_info.speed %
LINK_SPEED_MAX];
@@ -311,8 +311,8 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* mtu size is 256~9600 */
if (HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) <
@@ -338,7 +338,7 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
/* init vlan offoad */
err = hinic_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+ RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
if (err) {
PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed");
(void)hinic_config_mq_mode(dev, FALSE);
@@ -696,15 +696,15 @@ static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa)
} else {
*speed_capa = 0;
if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G))
- *speed_capa |= ETH_LINK_SPEED_1G;
+ *speed_capa |= RTE_ETH_LINK_SPEED_1G;
if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G))
- *speed_capa |= ETH_LINK_SPEED_10G;
+ *speed_capa |= RTE_ETH_LINK_SPEED_10G;
if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G))
- *speed_capa |= ETH_LINK_SPEED_25G;
+ *speed_capa |= RTE_ETH_LINK_SPEED_25G;
if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G))
- *speed_capa |= ETH_LINK_SPEED_40G;
+ *speed_capa |= RTE_ETH_LINK_SPEED_40G;
if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G))
- *speed_capa |= ETH_LINK_SPEED_100G;
+ *speed_capa |= RTE_ETH_LINK_SPEED_100G;
}
}
@@ -732,24 +732,24 @@ hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
hinic_get_speed_capa(dev, &info->speed_capa);
info->rx_queue_offload_capa = 0;
- info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_RSS_HASH;
+ info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
info->tx_queue_offload_capa = 0;
- info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS;
+ info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
info->hash_key_size = HINIC_RSS_KEY_SIZE;
info->reta_size = HINIC_RSS_INDIR_SIZE;
@@ -846,20 +846,20 @@ static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev,
u8 port_link_status = 0;
struct nic_port_info port_link_info;
struct hinic_hwdev *nic_hwdev = nic_dev->hwdev;
- uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
- ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
- ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
- ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+ uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+ RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+ RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+ RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
rc = hinic_get_link_status(nic_hwdev, &port_link_status);
if (rc)
return rc;
if (!port_link_status) {
- link->link_status = ETH_LINK_DOWN;
+ link->link_status = RTE_ETH_LINK_DOWN;
link->link_speed = 0;
- link->link_duplex = ETH_LINK_HALF_DUPLEX;
- link->link_autoneg = ETH_LINK_FIXED;
+ link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link->link_autoneg = RTE_ETH_LINK_FIXED;
return HINIC_OK;
}
@@ -901,8 +901,8 @@ static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete)
/* Get link status information from hardware */
rc = hinic_priv_get_dev_link_status(nic_dev, &link);
if (rc != HINIC_OK) {
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR, "Get link status failed");
goto out;
}
@@ -1650,8 +1650,8 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
int err;
/* Enable or disable VLAN filter */
- if (mask & ETH_VLAN_FILTER_MASK) {
- on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ?
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) ?
TRUE : FALSE;
err = hinic_config_vlan_filter(nic_dev->hwdev, on);
if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
@@ -1672,8 +1672,8 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
/* Enable or disable VLAN stripping */
- if (mask & ETH_VLAN_STRIP_MASK) {
- on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ?
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) ?
TRUE : FALSE;
err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on);
if (err) {
@@ -1859,13 +1859,13 @@ static int hinic_flow_ctrl_get(struct rte_eth_dev *dev,
fc_conf->autoneg = nic_pause.auto_neg;
if (nic_pause.tx_pause && nic_pause.rx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (nic_pause.tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else if (nic_pause.rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
@@ -1879,14 +1879,14 @@ static int hinic_flow_ctrl_set(struct rte_eth_dev *dev,
nic_pause.auto_neg = fc_conf->autoneg;
- if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
- (fc_conf->mode & RTE_FC_TX_PAUSE))
+ if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
nic_pause.tx_pause = true;
else
nic_pause.tx_pause = false;
- if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
- (fc_conf->mode & RTE_FC_RX_PAUSE))
+ if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
nic_pause.rx_pause = true;
else
nic_pause.rx_pause = false;
@@ -1930,7 +1930,7 @@ static int hinic_rss_hash_update(struct rte_eth_dev *dev,
struct nic_rss_type rss_type = {0};
int err = 0;
- if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+ if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
PMD_DRV_LOG(WARNING, "RSS is not enabled");
return HINIC_OK;
}
@@ -1951,14 +1951,14 @@ static int hinic_rss_hash_update(struct rte_eth_dev *dev,
}
}
- rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
- rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
- rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
- rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
- rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
- rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
- rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
- rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+ rss_type.ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+ rss_type.tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+ rss_type.ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+ rss_type.ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+ rss_type.tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+ rss_type.tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+ rss_type.udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+ rss_type.udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
if (err) {
@@ -1994,7 +1994,7 @@ static int hinic_rss_conf_get(struct rte_eth_dev *dev,
struct nic_rss_type rss_type = {0};
int err;
- if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+ if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
PMD_DRV_LOG(WARNING, "RSS is not enabled");
return HINIC_ERROR;
}
@@ -2015,15 +2015,15 @@ static int hinic_rss_conf_get(struct rte_eth_dev *dev,
rss_conf->rss_hf = 0;
rss_conf->rss_hf |= rss_type.ipv4 ?
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0;
- rss_conf->rss_hf |= rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4) : 0;
+ rss_conf->rss_hf |= rss_type.tcp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_TCP : 0;
rss_conf->rss_hf |= rss_type.ipv6 ?
- (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0;
- rss_conf->rss_hf |= rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0;
- rss_conf->rss_hf |= rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
- rss_conf->rss_hf |= rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0;
- rss_conf->rss_hf |= rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
- rss_conf->rss_hf |= rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
+ (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6) : 0;
+ rss_conf->rss_hf |= rss_type.ipv6_ext ? RTE_ETH_RSS_IPV6_EX : 0;
+ rss_conf->rss_hf |= rss_type.tcp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_TCP : 0;
+ rss_conf->rss_hf |= rss_type.tcp_ipv6_ext ? RTE_ETH_RSS_IPV6_TCP_EX : 0;
+ rss_conf->rss_hf |= rss_type.udp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_UDP : 0;
+ rss_conf->rss_hf |= rss_type.udp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_UDP : 0;
return HINIC_OK;
}
@@ -2053,7 +2053,7 @@ static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
u16 i = 0;
u16 idx, shift;
- if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG))
+ if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG))
return HINIC_OK;
if (reta_size != NIC_RSS_INDIR_SIZE) {
@@ -2067,8 +2067,8 @@ static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
/* update rss indir_tbl */
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].reta[shift] >= nic_dev->num_rq) {
PMD_DRV_LOG(ERR, "Invalid reta entry, indirtbl[%d]: %d "
@@ -2133,8 +2133,8 @@ static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev,
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i];
}
@@ -504,14 +504,14 @@ static void hinic_fill_rss_type(struct nic_rss_type *rss_type,
{
u64 rss_hf = rss_conf->rss_hf;
- rss_type->ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
- rss_type->tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
- rss_type->ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
- rss_type->ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
- rss_type->tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
- rss_type->tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
- rss_type->udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
- rss_type->udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+ rss_type->ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+ rss_type->tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+ rss_type->ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+ rss_type->ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+ rss_type->tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+ rss_type->tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+ rss_type->udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+ rss_type->udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
}
static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir)
@@ -588,8 +588,8 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
{
int err, i;
- if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
- nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+ if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
+ nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
nic_dev->num_rss = 0;
if (nic_dev->num_rq > 1) {
/* get rss template id */
@@ -599,7 +599,7 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
PMD_DRV_LOG(WARNING, "Alloc rss template failed");
return err;
}
- nic_dev->flags |= ETH_MQ_RX_RSS_FLAG;
+ nic_dev->flags |= RTE_ETH_MQ_RX_RSS_FLAG;
for (i = 0; i < nic_dev->num_rq; i++)
hinic_add_rq_to_rx_queue_list(nic_dev, i);
}
@@ -610,12 +610,12 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
{
- if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+ if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
if (hinic_rss_template_free(nic_dev->hwdev,
nic_dev->rss_tmpl_idx))
PMD_DRV_LOG(WARNING, "Free rss template failed");
- nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+ nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
}
}
@@ -641,7 +641,7 @@ int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on)
int ret = 0;
switch (dev_conf->rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_RSS:
ret = hinic_config_mq_rx_rss(nic_dev, on);
break;
default:
@@ -662,7 +662,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
int lro_wqe_num;
int buf_size;
- if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+ if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
if (rss_conf.rss_hf == 0) {
rss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL;
} else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) {
@@ -678,7 +678,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
}
/* Enable both L3/L4 rx checksum offload */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
err = hinic_set_rx_csum_offload(nic_dev->hwdev,
@@ -687,7 +687,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
goto rx_csum_ofl_err;
/* config lro */
- lro_en = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ?
+ lro_en = dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ?
true : false;
max_lro_size = dev->data->dev_conf.rxmode.max_lro_pkt_size;
buf_size = nic_dev->hwdev->nic_io->rq_buf_size;
@@ -726,7 +726,7 @@ void hinic_rx_remove_configure(struct rte_eth_dev *dev)
{
struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
- if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+ if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
hinic_rss_deinit(nic_dev);
hinic_destroy_num_qps(nic_dev);
}
@@ -8,17 +8,17 @@
#define HINIC_DEFAULT_RX_FREE_THRESH 32
#define HINIC_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_FRAG_IPV4 |\
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 |\
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_EX | \
+ RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
enum rq_completion_fmt {
RQ_COMPLETE_SGE = 1
@@ -1536,7 +1536,7 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns)
return ret;
}
- if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
if (dcb_rx_conf->nb_tcs == 0)
hw->dcb_info.pfc_en = 1; /* tc0 only */
@@ -1693,7 +1693,7 @@ hns3_update_queue_map_configure(struct hns3_adapter *hns)
uint16_t nb_tx_q = hw->data->nb_tx_queues;
int ret;
- if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+ if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
return 0;
ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
@@ -1713,22 +1713,22 @@ static void
hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
{
switch (mode) {
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
hw->requested_fc_mode = HNS3_FC_NONE;
break;
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
hw->requested_fc_mode = HNS3_FC_RX_PAUSE;
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
hw->requested_fc_mode = HNS3_FC_TX_PAUSE;
break;
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
hw->requested_fc_mode = HNS3_FC_FULL;
break;
default:
hw->requested_fc_mode = HNS3_FC_NONE;
hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
- "configured to RTE_FC_NONE", mode);
+ "configured to RTE_ETH_FC_NONE", mode);
break;
}
}
@@ -60,29 +60,29 @@ enum hns3_evt_cause {
};
static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
- { ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
- { ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ { RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
- { ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ { RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
- { ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ { RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
- { ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ { RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
- { ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
};
@@ -500,8 +500,8 @@ hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
struct hns3_cmd_desc desc;
int ret;
- if ((vlan_type != ETH_VLAN_TYPE_INNER &&
- vlan_type != ETH_VLAN_TYPE_OUTER)) {
+ if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+ vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) {
hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
return -EINVAL;
}
@@ -514,10 +514,10 @@ hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
- if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
- } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+ } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
@@ -725,11 +725,11 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
rte_spinlock_lock(&hw->lock);
rxmode = &dev->data->dev_conf.rxmode;
tmp_mask = (unsigned int)mask;
- if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+ if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
/* ignore vlan filter configuration during promiscuous mode */
if (!dev->data->promiscuous) {
/* Enable or disable VLAN filter */
- enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ?
+ enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ?
true : false;
ret = hns3_enable_vlan_filter(hns, enable);
@@ -742,9 +742,9 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
}
- if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+ if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ?
+ enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ?
true : false;
ret = hns3_en_hw_strip_rxvtag(hns, enable);
@@ -1118,7 +1118,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
return ret;
}
- ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER,
+ ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER,
RTE_ETHER_TYPE_VLAN);
if (ret) {
hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
@@ -1161,7 +1161,7 @@ hns3_restore_vlan_conf(struct hns3_adapter *hns)
if (!hw->data->promiscuous) {
/* restore vlan filter states */
offloads = hw->data->dev_conf.rxmode.offloads;
- enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false;
+ enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false;
ret = hns3_enable_vlan_filter(hns, enable);
if (ret) {
hns3_err(hw, "failed to restore vlan rx filter conf, "
@@ -1204,7 +1204,7 @@ hns3_dev_configure_vlan(struct rte_eth_dev *dev)
txmode->hw_vlan_reject_untagged);
/* Apply vlan offload setting */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
ret = hns3_vlan_offload_set(dev, mask);
if (ret) {
hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
@@ -2213,9 +2213,9 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
int max_tc = 0;
int i;
- if ((rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG) ||
- (tx_mq_mode == ETH_MQ_TX_VMDQ_DCB ||
- tx_mq_mode == ETH_MQ_TX_VMDQ_ONLY)) {
+ if ((rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) ||
+ (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
+ tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) {
hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.",
rx_mq_mode, tx_mq_mode);
return -EOPNOTSUPP;
@@ -2223,7 +2223,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
- if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
if (dcb_rx_conf->nb_tcs > pf->tc_max) {
hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
dcb_rx_conf->nb_tcs, pf->tc_max);
@@ -2232,7 +2232,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
- hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
+ hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, "
"nb_tcs(%d) != %d or %d in rx direction.",
dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
return -EINVAL;
@@ -2400,11 +2400,11 @@ hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds)
* configure link_speeds (default 0), which means auto-negotiation.
* In this case, it should return success.
*/
- if (link_speeds == ETH_LINK_SPEED_AUTONEG &&
+ if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
hw->mac.support_autoneg == 0)
return 0;
- if (link_speeds != ETH_LINK_SPEED_AUTONEG) {
+ if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
ret = hns3_check_port_speed(hw, link_speeds);
if (ret)
return ret;
@@ -2464,15 +2464,15 @@ hns3_dev_configure(struct rte_eth_dev *dev)
if (ret)
goto cfg_err;
- if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
ret = hns3_setup_dcb(dev);
if (ret)
goto cfg_err;
}
/* When RSS is not configured, redirect the packet queue 0 */
- if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+ conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
rss_conf = conf->rx_adv_conf.rss_conf;
hw->rss_dis_flag = false;
ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -2493,7 +2493,7 @@ hns3_dev_configure(struct rte_eth_dev *dev)
goto cfg_err;
/* config hardware GRO */
- gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+ gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
ret = hns3_config_gro(hw, gro_en);
if (ret)
goto cfg_err;
@@ -2600,15 +2600,15 @@ hns3_get_copper_port_speed_capa(uint32_t supported_speed)
uint32_t speed_capa = 0;
if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT)
- speed_capa |= ETH_LINK_SPEED_10M_HD;
+ speed_capa |= RTE_ETH_LINK_SPEED_10M_HD;
if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT)
- speed_capa |= ETH_LINK_SPEED_10M;
+ speed_capa |= RTE_ETH_LINK_SPEED_10M;
if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT)
- speed_capa |= ETH_LINK_SPEED_100M_HD;
+ speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT)
- speed_capa |= ETH_LINK_SPEED_100M;
+ speed_capa |= RTE_ETH_LINK_SPEED_100M;
if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT)
- speed_capa |= ETH_LINK_SPEED_1G;
+ speed_capa |= RTE_ETH_LINK_SPEED_1G;
return speed_capa;
}
@@ -2619,19 +2619,19 @@ hns3_get_firber_port_speed_capa(uint32_t supported_speed)
uint32_t speed_capa = 0;
if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT)
- speed_capa |= ETH_LINK_SPEED_1G;
+ speed_capa |= RTE_ETH_LINK_SPEED_1G;
if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT)
- speed_capa |= ETH_LINK_SPEED_10G;
+ speed_capa |= RTE_ETH_LINK_SPEED_10G;
if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT)
- speed_capa |= ETH_LINK_SPEED_25G;
+ speed_capa |= RTE_ETH_LINK_SPEED_25G;
if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT)
- speed_capa |= ETH_LINK_SPEED_40G;
+ speed_capa |= RTE_ETH_LINK_SPEED_40G;
if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT)
- speed_capa |= ETH_LINK_SPEED_50G;
+ speed_capa |= RTE_ETH_LINK_SPEED_50G;
if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT)
- speed_capa |= ETH_LINK_SPEED_100G;
+ speed_capa |= RTE_ETH_LINK_SPEED_100G;
if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT)
- speed_capa |= ETH_LINK_SPEED_200G;
+ speed_capa |= RTE_ETH_LINK_SPEED_200G;
return speed_capa;
}
@@ -2650,7 +2650,7 @@ hns3_get_speed_capa(struct hns3_hw *hw)
hns3_get_firber_port_speed_capa(mac->supported_speed);
if (mac->support_autoneg == 0)
- speed_capa |= ETH_LINK_SPEED_FIXED;
+ speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
return speed_capa;
}
@@ -2676,40 +2676,40 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
- info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_SCTP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_RSS_HASH |
- DEV_RX_OFFLOAD_TCP_LRO);
- info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+ info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO);
+ info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
hns3_txvlan_cap_get(hw));
if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
- info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+ info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
if (hns3_dev_get_support(hw, INDEP_TXRX))
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
if (hns3_dev_get_support(hw, PTP))
- info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+ info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = HNS3_MAX_RING_DESC,
@@ -2793,7 +2793,7 @@ hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
ret = hns3_update_link_info(eth_dev);
if (ret)
- hw->mac.link_status = ETH_LINK_DOWN;
+ hw->mac.link_status = RTE_ETH_LINK_DOWN;
return ret;
}
@@ -2806,29 +2806,29 @@ hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
struct hns3_mac *mac = &hw->mac;
switch (mac->link_speed) {
- case ETH_SPEED_NUM_10M:
- case ETH_SPEED_NUM_100M:
- case ETH_SPEED_NUM_1G:
- case ETH_SPEED_NUM_10G:
- case ETH_SPEED_NUM_25G:
- case ETH_SPEED_NUM_40G:
- case ETH_SPEED_NUM_50G:
- case ETH_SPEED_NUM_100G:
- case ETH_SPEED_NUM_200G:
+ case RTE_ETH_SPEED_NUM_10M:
+ case RTE_ETH_SPEED_NUM_100M:
+ case RTE_ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_50G:
+ case RTE_ETH_SPEED_NUM_100G:
+ case RTE_ETH_SPEED_NUM_200G:
if (mac->link_status)
new_link->link_speed = mac->link_speed;
break;
default:
if (mac->link_status)
- new_link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+ new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
break;
}
if (!mac->link_status)
- new_link->link_speed = ETH_SPEED_NUM_NONE;
+ new_link->link_speed = RTE_ETH_SPEED_NUM_NONE;
new_link->link_duplex = mac->link_duplex;
- new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+ new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
new_link->link_autoneg = mac->link_autoneg;
}
@@ -2848,8 +2848,8 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
if (eth_dev->data->dev_started == 0) {
new_link.link_autoneg = mac->link_autoneg;
new_link.link_duplex = mac->link_duplex;
- new_link.link_speed = ETH_SPEED_NUM_NONE;
- new_link.link_status = ETH_LINK_DOWN;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ new_link.link_status = RTE_ETH_LINK_DOWN;
goto out;
}
@@ -2861,7 +2861,7 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
break;
}
- if (!wait_to_complete || mac->link_status == ETH_LINK_UP)
+ if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP)
break;
rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
@@ -3207,31 +3207,31 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed)
{
switch (speed_cmd) {
case HNS3_CFG_SPEED_10M:
- *speed = ETH_SPEED_NUM_10M;
+ *speed = RTE_ETH_SPEED_NUM_10M;
break;
case HNS3_CFG_SPEED_100M:
- *speed = ETH_SPEED_NUM_100M;
+ *speed = RTE_ETH_SPEED_NUM_100M;
break;
case HNS3_CFG_SPEED_1G:
- *speed = ETH_SPEED_NUM_1G;
+ *speed = RTE_ETH_SPEED_NUM_1G;
break;
case HNS3_CFG_SPEED_10G:
- *speed = ETH_SPEED_NUM_10G;
+ *speed = RTE_ETH_SPEED_NUM_10G;
break;
case HNS3_CFG_SPEED_25G:
- *speed = ETH_SPEED_NUM_25G;
+ *speed = RTE_ETH_SPEED_NUM_25G;
break;
case HNS3_CFG_SPEED_40G:
- *speed = ETH_SPEED_NUM_40G;
+ *speed = RTE_ETH_SPEED_NUM_40G;
break;
case HNS3_CFG_SPEED_50G:
- *speed = ETH_SPEED_NUM_50G;
+ *speed = RTE_ETH_SPEED_NUM_50G;
break;
case HNS3_CFG_SPEED_100G:
- *speed = ETH_SPEED_NUM_100G;
+ *speed = RTE_ETH_SPEED_NUM_100G;
break;
case HNS3_CFG_SPEED_200G:
- *speed = ETH_SPEED_NUM_200G;
+ *speed = RTE_ETH_SPEED_NUM_200G;
break;
default:
return -EINVAL;
@@ -3559,39 +3559,39 @@ hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
switch (speed) {
- case ETH_SPEED_NUM_10M:
+ case RTE_ETH_SPEED_NUM_10M:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
break;
- case ETH_SPEED_NUM_100M:
+ case RTE_ETH_SPEED_NUM_100M:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
break;
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
break;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
break;
- case ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_25G:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
break;
- case ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_40G:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
break;
- case ETH_SPEED_NUM_50G:
+ case RTE_ETH_SPEED_NUM_50G:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
break;
- case ETH_SPEED_NUM_100G:
+ case RTE_ETH_SPEED_NUM_100G:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
break;
- case ETH_SPEED_NUM_200G:
+ case RTE_ETH_SPEED_NUM_200G:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
break;
@@ -4254,14 +4254,14 @@ hns3_mac_init(struct hns3_hw *hw)
int ret;
pf->support_sfp_query = true;
- mac->link_duplex = ETH_LINK_FULL_DUPLEX;
+ mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
if (ret) {
PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
return ret;
}
- mac->link_status = ETH_LINK_DOWN;
+ mac->link_status = RTE_ETH_LINK_DOWN;
return hns3_config_mtu(hw, pf->mps);
}
@@ -4511,7 +4511,7 @@ hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
* all packets coming in in the receiving direction.
*/
offloads = dev->data->dev_conf.rxmode.offloads;
- if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
ret = hns3_enable_vlan_filter(hns, false);
if (ret) {
hns3_err(hw, "failed to enable promiscuous mode due to "
@@ -4552,7 +4552,7 @@ hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
}
/* when promiscuous mode was disabled, restore the vlan filter status */
offloads = dev->data->dev_conf.rxmode.offloads;
- if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
ret = hns3_enable_vlan_filter(hns, true);
if (ret) {
hns3_err(hw, "failed to disable promiscuous mode due to"
@@ -4672,8 +4672,8 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
mac_info->supported_speed =
rte_le_to_cpu_32(resp->supported_speed);
mac_info->support_autoneg = resp->autoneg_ability;
- mac_info->link_autoneg = (resp->autoneg == 0) ? ETH_LINK_FIXED
- : ETH_LINK_AUTONEG;
+ mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED
+ : RTE_ETH_LINK_AUTONEG;
} else {
mac_info->query_type = HNS3_DEFAULT_QUERY;
}
@@ -4684,8 +4684,8 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
static uint8_t
hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
{
- if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M))
- duplex = ETH_LINK_FULL_DUPLEX;
+ if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M))
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
return duplex;
}
@@ -4735,7 +4735,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
return ret;
/* Do nothing if no SFP */
- if (mac_info.link_speed == ETH_SPEED_NUM_NONE)
+ if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE)
return 0;
/*
@@ -4762,7 +4762,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
/* Config full duplex for SFP */
return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed,
- ETH_LINK_FULL_DUPLEX);
+ RTE_ETH_LINK_FULL_DUPLEX);
}
static void
@@ -4881,10 +4881,10 @@ hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
/*
- * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
+ * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
* when receiving frames. Otherwise, CRC will be stripped.
*/
- if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
else
hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
@@ -4912,7 +4912,7 @@ hns3_get_mac_link_status(struct hns3_hw *hw)
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
hns3_err(hw, "get link status cmd failed %d", ret);
- return ETH_LINK_DOWN;
+ return RTE_ETH_LINK_DOWN;
}
req = (struct hns3_link_status_cmd *)desc.data;
@@ -5094,19 +5094,19 @@ hns3_set_firber_default_support_speed(struct hns3_hw *hw)
struct hns3_mac *mac = &hw->mac;
switch (mac->link_speed) {
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
return HNS3_FIBER_LINK_SPEED_1G_BIT;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
return HNS3_FIBER_LINK_SPEED_10G_BIT;
- case ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_25G:
return HNS3_FIBER_LINK_SPEED_25G_BIT;
- case ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_40G:
return HNS3_FIBER_LINK_SPEED_40G_BIT;
- case ETH_SPEED_NUM_50G:
+ case RTE_ETH_SPEED_NUM_50G:
return HNS3_FIBER_LINK_SPEED_50G_BIT;
- case ETH_SPEED_NUM_100G:
+ case RTE_ETH_SPEED_NUM_100G:
return HNS3_FIBER_LINK_SPEED_100G_BIT;
- case ETH_SPEED_NUM_200G:
+ case RTE_ETH_SPEED_NUM_200G:
return HNS3_FIBER_LINK_SPEED_200G_BIT;
default:
hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed);
@@ -5344,20 +5344,20 @@ hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds)
{
uint32_t speed_bit;
- switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
- case ETH_LINK_SPEED_10M:
+ switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+ case RTE_ETH_LINK_SPEED_10M:
speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT;
break;
- case ETH_LINK_SPEED_10M_HD:
+ case RTE_ETH_LINK_SPEED_10M_HD:
speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT;
break;
- case ETH_LINK_SPEED_100M:
+ case RTE_ETH_LINK_SPEED_100M:
speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT;
break;
- case ETH_LINK_SPEED_100M_HD:
+ case RTE_ETH_LINK_SPEED_100M_HD:
speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT;
break;
- case ETH_LINK_SPEED_1G:
+ case RTE_ETH_LINK_SPEED_1G:
speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT;
break;
default:
@@ -5373,26 +5373,26 @@ hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds)
{
uint32_t speed_bit;
- switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
- case ETH_LINK_SPEED_1G:
+ switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+ case RTE_ETH_LINK_SPEED_1G:
speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT;
break;
- case ETH_LINK_SPEED_10G:
+ case RTE_ETH_LINK_SPEED_10G:
speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT;
break;
- case ETH_LINK_SPEED_25G:
+ case RTE_ETH_LINK_SPEED_25G:
speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT;
break;
- case ETH_LINK_SPEED_40G:
+ case RTE_ETH_LINK_SPEED_40G:
speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT;
break;
- case ETH_LINK_SPEED_50G:
+ case RTE_ETH_LINK_SPEED_50G:
speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT;
break;
- case ETH_LINK_SPEED_100G:
+ case RTE_ETH_LINK_SPEED_100G:
speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT;
break;
- case ETH_LINK_SPEED_200G:
+ case RTE_ETH_LINK_SPEED_200G:
speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT;
break;
default:
@@ -5427,28 +5427,28 @@ hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds)
static inline uint32_t
hns3_get_link_speed(uint32_t link_speeds)
{
- uint32_t speed = ETH_SPEED_NUM_NONE;
-
- if (link_speeds & ETH_LINK_SPEED_10M ||
- link_speeds & ETH_LINK_SPEED_10M_HD)
- speed = ETH_SPEED_NUM_10M;
- if (link_speeds & ETH_LINK_SPEED_100M ||
- link_speeds & ETH_LINK_SPEED_100M_HD)
- speed = ETH_SPEED_NUM_100M;
- if (link_speeds & ETH_LINK_SPEED_1G)
- speed = ETH_SPEED_NUM_1G;
- if (link_speeds & ETH_LINK_SPEED_10G)
- speed = ETH_SPEED_NUM_10G;
- if (link_speeds & ETH_LINK_SPEED_25G)
- speed = ETH_SPEED_NUM_25G;
- if (link_speeds & ETH_LINK_SPEED_40G)
- speed = ETH_SPEED_NUM_40G;
- if (link_speeds & ETH_LINK_SPEED_50G)
- speed = ETH_SPEED_NUM_50G;
- if (link_speeds & ETH_LINK_SPEED_100G)
- speed = ETH_SPEED_NUM_100G;
- if (link_speeds & ETH_LINK_SPEED_200G)
- speed = ETH_SPEED_NUM_200G;
+ uint32_t speed = RTE_ETH_SPEED_NUM_NONE;
+
+ if (link_speeds & RTE_ETH_LINK_SPEED_10M ||
+ link_speeds & RTE_ETH_LINK_SPEED_10M_HD)
+ speed = RTE_ETH_SPEED_NUM_10M;
+ if (link_speeds & RTE_ETH_LINK_SPEED_100M ||
+ link_speeds & RTE_ETH_LINK_SPEED_100M_HD)
+ speed = RTE_ETH_SPEED_NUM_100M;
+ if (link_speeds & RTE_ETH_LINK_SPEED_1G)
+ speed = RTE_ETH_SPEED_NUM_1G;
+ if (link_speeds & RTE_ETH_LINK_SPEED_10G)
+ speed = RTE_ETH_SPEED_NUM_10G;
+ if (link_speeds & RTE_ETH_LINK_SPEED_25G)
+ speed = RTE_ETH_SPEED_NUM_25G;
+ if (link_speeds & RTE_ETH_LINK_SPEED_40G)
+ speed = RTE_ETH_SPEED_NUM_40G;
+ if (link_speeds & RTE_ETH_LINK_SPEED_50G)
+ speed = RTE_ETH_SPEED_NUM_50G;
+ if (link_speeds & RTE_ETH_LINK_SPEED_100G)
+ speed = RTE_ETH_SPEED_NUM_100G;
+ if (link_speeds & RTE_ETH_LINK_SPEED_200G)
+ speed = RTE_ETH_SPEED_NUM_200G;
return speed;
}
@@ -5456,11 +5456,11 @@ hns3_get_link_speed(uint32_t link_speeds)
static uint8_t
hns3_get_link_duplex(uint32_t link_speeds)
{
- if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
- (link_speeds & ETH_LINK_SPEED_100M_HD))
- return ETH_LINK_HALF_DUPLEX;
+ if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+ (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+ return RTE_ETH_LINK_HALF_DUPLEX;
else
- return ETH_LINK_FULL_DUPLEX;
+ return RTE_ETH_LINK_FULL_DUPLEX;
}
static int
@@ -5594,9 +5594,9 @@ hns3_apply_link_speed(struct hns3_hw *hw)
struct hns3_set_link_speed_cfg cfg;
memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg));
- cfg.autoneg = (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) ?
- ETH_LINK_AUTONEG : ETH_LINK_FIXED;
- if (cfg.autoneg != ETH_LINK_AUTONEG) {
+ cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ?
+ RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
+ if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) {
cfg.speed = hns3_get_link_speed(conf->link_speeds);
cfg.duplex = hns3_get_link_duplex(conf->link_speeds);
}
@@ -5869,7 +5869,7 @@ hns3_do_stop(struct hns3_adapter *hns)
ret = hns3_cfg_mac_mode(hw, false);
if (ret)
return ret;
- hw->mac.link_status = ETH_LINK_DOWN;
+ hw->mac.link_status = RTE_ETH_LINK_DOWN;
if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
hns3_configure_all_mac_addr(hns, true);
@@ -6080,17 +6080,17 @@ hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
current_mode = hns3_get_current_fc_mode(dev);
switch (current_mode) {
case HNS3_FC_FULL:
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
break;
case HNS3_FC_TX_PAUSE:
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
break;
case HNS3_FC_RX_PAUSE:
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
break;
case HNS3_FC_NONE:
default:
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
break;
}
@@ -6236,7 +6236,7 @@ hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
int i;
rte_spinlock_lock(&hw->lock);
- if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+ if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
dcb_info->nb_tcs = pf->local_max_tc;
else
dcb_info->nb_tcs = 1;
@@ -6536,7 +6536,7 @@ hns3_stop_service(struct hns3_adapter *hns)
struct rte_eth_dev *eth_dev;
eth_dev = &rte_eth_devices[hw->data->port_id];
- hw->mac.link_status = ETH_LINK_DOWN;
+ hw->mac.link_status = RTE_ETH_LINK_DOWN;
if (hw->adapter_state == HNS3_NIC_STARTED) {
rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
hns3_update_linkstatus_and_event(hw, false);
@@ -6826,7 +6826,7 @@ get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
* in device of link speed
* below 10 Gbps.
*/
- if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
+ if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) {
*state = 0;
return 0;
}
@@ -6858,7 +6858,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
* configured FEC mode is returned.
* If link is up, current FEC mode is returned.
*/
- if (hw->mac.link_status == ETH_LINK_DOWN) {
+ if (hw->mac.link_status == RTE_ETH_LINK_DOWN) {
ret = get_current_fec_auto_state(hw, &auto_state);
if (ret)
return ret;
@@ -6957,12 +6957,12 @@ get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
uint32_t cur_capa;
switch (mac->link_speed) {
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
cur_capa = fec_capa[1].capa;
break;
- case ETH_SPEED_NUM_25G:
- case ETH_SPEED_NUM_100G:
- case ETH_SPEED_NUM_200G:
+ case RTE_ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_100G:
+ case RTE_ETH_SPEED_NUM_200G:
cur_capa = fec_capa[0].capa;
break;
default:
@@ -190,10 +190,10 @@ struct hns3_mac {
uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
uint8_t media_type;
uint8_t phy_addr;
- uint8_t link_duplex : 1; /* ETH_LINK_[HALF/FULL]_DUPLEX */
- uint8_t link_autoneg : 1; /* ETH_LINK_[AUTONEG/FIXED] */
- uint8_t link_status : 1; /* ETH_LINK_[DOWN/UP] */
- uint32_t link_speed; /* ETH_SPEED_NUM_ */
+ uint8_t link_duplex : 1; /* RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+ uint8_t link_autoneg : 1; /* RTE_ETH_LINK_[AUTONEG/FIXED] */
+ uint8_t link_status : 1; /* RTE_ETH_LINK_[DOWN/UP] */
+ uint32_t link_speed; /* RTE_ETH_SPEED_NUM_ */
/*
* Some firmware versions support only the SFP speed query. In addition
* to the SFP speed query, some firmware supports the query of the speed
@@ -1079,9 +1079,9 @@ static inline uint64_t
hns3_txvlan_cap_get(struct hns3_hw *hw)
{
if (hw->port_base_vlan_cfg.state)
- return DEV_TX_OFFLOAD_VLAN_INSERT;
+ return RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
else
- return DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT;
+ return RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
}
#endif /* _HNS3_ETHDEV_H_ */
@@ -807,15 +807,15 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
}
hw->adapter_state = HNS3_NIC_CONFIGURING;
- if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+ if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
hns3_err(hw, "setting link speed/duplex not supported");
ret = -EINVAL;
goto cfg_err;
}
/* When RSS is not configured, redirect the packet queue 0 */
- if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+ conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
hw->rss_dis_flag = false;
rss_conf = conf->rx_adv_conf.rss_conf;
ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -832,7 +832,7 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
goto cfg_err;
/* config hardware GRO */
- gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+ gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
ret = hns3_config_gro(hw, gro_en);
if (ret)
goto cfg_err;
@@ -935,32 +935,32 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
- info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_SCTP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_RSS_HASH |
- DEV_RX_OFFLOAD_TCP_LRO);
- info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+ info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO);
+ info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
hns3_txvlan_cap_get(hw));
if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
- info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+ info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
if (hns3_dev_get_support(hw, INDEP_TXRX))
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -1640,10 +1640,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
tmp_mask = (unsigned int)mask;
- if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+ if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
rte_spinlock_lock(&hw->lock);
/* Enable or disable VLAN filter */
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
ret = hns3vf_en_vlan_filter(hw, true);
else
ret = hns3vf_en_vlan_filter(hw, false);
@@ -1653,10 +1653,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
/* Vlan stripping setting */
- if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+ if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
rte_spinlock_lock(&hw->lock);
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
ret = hns3vf_en_hw_strip_rxvtag(hw, true);
else
ret = hns3vf_en_hw_strip_rxvtag(hw, false);
@@ -1724,7 +1724,7 @@ hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
int ret;
dev_conf = &hw->data->dev_conf;
- en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
+ en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
: false;
ret = hns3vf_en_hw_strip_rxvtag(hw, en);
if (ret)
@@ -1749,8 +1749,8 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
}
/* Apply vlan offload setting */
- ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK);
+ ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK);
if (ret)
hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
@@ -2059,7 +2059,7 @@ hns3vf_do_stop(struct hns3_adapter *hns)
struct hns3_hw *hw = &hns->hw;
int ret;
- hw->mac.link_status = ETH_LINK_DOWN;
+ hw->mac.link_status = RTE_ETH_LINK_DOWN;
/*
* The "hns3vf_do_stop" function will also be called by .stop_service to
@@ -2218,31 +2218,31 @@ hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
memset(&new_link, 0, sizeof(new_link));
switch (mac->link_speed) {
- case ETH_SPEED_NUM_10M:
- case ETH_SPEED_NUM_100M:
- case ETH_SPEED_NUM_1G:
- case ETH_SPEED_NUM_10G:
- case ETH_SPEED_NUM_25G:
- case ETH_SPEED_NUM_40G:
- case ETH_SPEED_NUM_50G:
- case ETH_SPEED_NUM_100G:
- case ETH_SPEED_NUM_200G:
+ case RTE_ETH_SPEED_NUM_10M:
+ case RTE_ETH_SPEED_NUM_100M:
+ case RTE_ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_50G:
+ case RTE_ETH_SPEED_NUM_100G:
+ case RTE_ETH_SPEED_NUM_200G:
if (mac->link_status)
new_link.link_speed = mac->link_speed;
break;
default:
if (mac->link_status)
- new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
break;
}
if (!mac->link_status)
- new_link.link_speed = ETH_SPEED_NUM_NONE;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
new_link.link_duplex = mac->link_duplex;
- new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+ new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
new_link.link_autoneg =
- !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+ !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
return rte_eth_linkstatus_set(eth_dev, &new_link);
}
@@ -2570,11 +2570,11 @@ hns3vf_stop_service(struct hns3_adapter *hns)
* Make sure call update link status before hns3vf_stop_poll_job
* because update link status depend on polling job exist.
*/
- hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed,
+ hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
hw->mac.link_duplex);
hns3vf_stop_poll_job(eth_dev);
}
- hw->mac.link_status = ETH_LINK_DOWN;
+ hw->mac.link_status = RTE_ETH_LINK_DOWN;
hns3_set_rxtx_function(eth_dev);
rte_wmb();
@@ -1298,10 +1298,10 @@ hns3_rss_input_tuple_supported(struct hns3_hw *hw,
* Kunpeng930 and future kunpeng series support to use src/dst port
* fields to RSS hash for IPv6 SCTP packet type.
*/
- if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
- (rss->types & ETH_RSS_IP ||
+ if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
+ (rss->types & RTE_ETH_RSS_IP ||
(!hw->rss_info.ipv6_sctp_offload_supported &&
- rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
+ rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
return false;
return true;
@@ -21,7 +21,7 @@ hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev,
struct hns3_hw *hw = &hns->hw;
int ret;
- if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
return 0;
ret = rte_mbuf_dyn_rx_timestamp_register
@@ -76,69 +76,69 @@ static const struct {
uint64_t rss_types;
uint64_t rss_field;
} hns3_set_tuple_table[] = {
- { ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
- { ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) },
- { ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) },
- { ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) },
- { ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) },
- { ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
- { ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) },
- { ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) },
- { ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) },
- { ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
- { ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) },
- { ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) },
- { ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) },
- { ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) },
- { ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) },
- { ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
- { ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) },
- { ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
- { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) },
- { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) },
- { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) },
- { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
- { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) },
- { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) },
- { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) },
- { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
- { ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) },
- { ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) },
- { ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) },
- { ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) },
- { ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) },
- { ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) },
};
@@ -146,44 +146,44 @@ static const struct {
uint64_t rss_types;
uint64_t rss_field;
} hns3_set_rss_types[] = {
- { ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
+ { RTE_ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
- { ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
- { ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
- { ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
+ { RTE_ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
- { ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
+ { RTE_ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER) },
- { ETH_RSS_NONFRAG_IPV4_OTHER,
+ { RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
- { ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
+ { RTE_ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
- { ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
+ { RTE_ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
- { ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
+ { RTE_ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
- { ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
+ { RTE_ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) |
BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) |
BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER) },
- { ETH_RSS_NONFRAG_IPV6_OTHER,
+ { RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }
};
@@ -365,10 +365,10 @@ hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw,
* When user does not specify the following types or a combination of
* the following types, it enables all fields for the supported RSS
* types. the following types as:
- * - ETH_RSS_L3_SRC_ONLY
- * - ETH_RSS_L3_DST_ONLY
- * - ETH_RSS_L4_SRC_ONLY
- * - ETH_RSS_L4_DST_ONLY
+ * - RTE_ETH_RSS_L3_SRC_ONLY
+ * - RTE_ETH_RSS_L3_DST_ONLY
+ * - RTE_ETH_RSS_L4_SRC_ONLY
+ * - RTE_ETH_RSS_L4_DST_ONLY
*/
if (fields_count == 0) {
for (i = 0; i < RTE_DIM(hns3_set_rss_types); i++) {
@@ -520,8 +520,8 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev,
memcpy(indirection_tbl, rss_cfg->rss_indirection_tbl,
sizeof(rss_cfg->rss_indirection_tbl));
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].reta[shift] >= hw->alloc_rss_size) {
rte_spinlock_unlock(&hw->lock);
hns3_err(hw, "queue id(%u) set to redirection table "
@@ -572,8 +572,8 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev,
}
rte_spinlock_lock(&hw->lock);
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] =
rss_cfg->rss_indirection_tbl[i];
@@ -692,7 +692,7 @@ hns3_config_rss(struct hns3_adapter *hns)
}
/* When RSS is off, redirect the packet queue 0 */
- if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) == 0)
+ if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0)
hns3_rss_uninit(hns);
/* Configure RSS hash algorithm and hash key offset */
@@ -709,7 +709,7 @@ hns3_config_rss(struct hns3_adapter *hns)
* When RSS is off, it doesn't need to configure rss redirection table
* to hardware.
*/
- if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+ if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
hw->rss_ind_tbl_size);
if (ret)
@@ -723,7 +723,7 @@ hns3_config_rss(struct hns3_adapter *hns)
return ret;
rss_indir_table_uninit:
- if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+ if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
ret1 = hns3_rss_reset_indir_table(hw);
if (ret1 != 0)
return ret;
@@ -8,20 +8,20 @@
#include <rte_flow.h>
#define HNS3_ETH_RSS_SUPPORT ( \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV4_OTHER | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
- ETH_RSS_NONFRAG_IPV6_OTHER | \
- ETH_RSS_L3_SRC_ONLY | \
- ETH_RSS_L3_DST_ONLY | \
- ETH_RSS_L4_SRC_ONLY | \
- ETH_RSS_L4_DST_ONLY)
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+ RTE_ETH_RSS_L3_SRC_ONLY | \
+ RTE_ETH_RSS_L3_DST_ONLY | \
+ RTE_ETH_RSS_L4_SRC_ONLY | \
+ RTE_ETH_RSS_L4_DST_ONLY)
#define HNS3_RSS_IND_TBL_SIZE 512 /* The size of hash lookup table */
#define HNS3_RSS_IND_TBL_SIZE_MAX 2048
@@ -1924,7 +1924,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
/* CRC len set here is used for amending packet length */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -1969,7 +1969,7 @@ hns3_rx_scattered_calc(struct rte_eth_dev *dev)
rxq->rx_buf_len);
}
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
dev->data->mtu + HNS3_ETH_OVERHEAD > hw->rx_buf_len)
dev->data->scattered_rx = true;
}
@@ -2845,7 +2845,7 @@ hns3_get_rx_function(struct rte_eth_dev *dev)
vec_allowed = vec_support && hns3_get_default_vec_support();
sve_allowed = vec_support && hns3_get_sve_support();
simple_allowed = !dev->data->scattered_rx &&
- (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
+ (offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) == 0;
if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
return hns3_recv_pkts_vec;
@@ -3139,7 +3139,7 @@ hns3_restore_gro_conf(struct hns3_hw *hw)
int ret;
offloads = hw->data->dev_conf.rxmode.offloads;
- gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+ gro_en = offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
ret = hns3_config_gro(hw, gro_en);
if (ret)
hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
@@ -4291,7 +4291,7 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev)
if (hns3_dev_get_support(hw, PTP))
return false;
- return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
+ return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE));
}
static bool
@@ -4303,16 +4303,16 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
return true;
#else
#define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_SCTP_CKSUM | \
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
- DEV_TX_OFFLOAD_GRE_TNL_TSO | \
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
@@ -307,7 +307,7 @@ struct hns3_rx_queue {
uint16_t rx_rearm_start; /* index of BD that driver re-arming from */
uint16_t rx_rearm_nb; /* number of remaining BDs to be re-armed */
- /* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
+ /* 4 if RTE_ETH_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
uint8_t crc_len;
/*
@@ -22,8 +22,8 @@ hns3_tx_check_vec_support(struct rte_eth_dev *dev)
if (hns3_dev_get_support(hw, PTP))
return -ENOTSUP;
- /* Only support DEV_TX_OFFLOAD_MBUF_FAST_FREE */
- if (txmode->offloads != DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ /* Only support RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE */
+ if (txmode->offloads != RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
return -ENOTSUP;
return 0;
@@ -228,10 +228,10 @@ hns3_rxq_vec_check(struct hns3_rx_queue *rxq, void *arg)
int
hns3_rx_check_vec_support(struct rte_eth_dev *dev)
{
- struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+ struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
- uint64_t offloads_mask = DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_VLAN;
+ uint64_t offloads_mask = RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ RTE_ETH_RX_OFFLOAD_VLAN;
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (hns3_dev_get_support(hw, PTP))
@@ -1629,7 +1629,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
/* Set the global registers with default ether type value */
if (!pf->support_multi_driver) {
- ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ ret = i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
RTE_ETHER_TYPE_VLAN);
if (ret != I40E_SUCCESS) {
PMD_INIT_LOG(ERR,
@@ -1896,8 +1896,8 @@ i40e_dev_configure(struct rte_eth_dev *dev)
ad->tx_simple_allowed = true;
ad->tx_vec_allowed = true;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* Only legacy filter API needs the following fdir config. So when the
* legacy filter API is deprecated, the following codes should also be
@@ -1931,13 +1931,13 @@ i40e_dev_configure(struct rte_eth_dev *dev)
* number, which will be available after rx_queue_setup(). dev_start()
* function is good to place RSS setup.
*/
- if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+ if (mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) {
ret = i40e_vmdq_setup(dev);
if (ret)
goto err;
}
- if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ if (mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
ret = i40e_dcb_setup(dev);
if (ret) {
PMD_DRV_LOG(ERR, "failed to configure DCB.");
@@ -2214,17 +2214,17 @@ i40e_parse_link_speeds(uint16_t link_speeds)
{
uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
- if (link_speeds & ETH_LINK_SPEED_40G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_40G)
link_speed |= I40E_LINK_SPEED_40GB;
- if (link_speeds & ETH_LINK_SPEED_25G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_25G)
link_speed |= I40E_LINK_SPEED_25GB;
- if (link_speeds & ETH_LINK_SPEED_20G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_20G)
link_speed |= I40E_LINK_SPEED_20GB;
- if (link_speeds & ETH_LINK_SPEED_10G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_10G)
link_speed |= I40E_LINK_SPEED_10GB;
- if (link_speeds & ETH_LINK_SPEED_1G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_1G)
link_speed |= I40E_LINK_SPEED_1GB;
- if (link_speeds & ETH_LINK_SPEED_100M)
+ if (link_speeds & RTE_ETH_LINK_SPEED_100M)
link_speed |= I40E_LINK_SPEED_100MB;
return link_speed;
@@ -2332,13 +2332,13 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
I40E_AQ_PHY_LINK_ENABLED;
- if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
- conf->link_speeds = ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_20G |
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_100M;
+ if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
+ conf->link_speeds = RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_20G |
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_100M;
abilities |= I40E_AQ_PHY_AN_ENABLED;
} else {
@@ -2876,34 +2876,34 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
/* Parse the link status */
switch (link_speed) {
case I40E_REG_SPEED_0:
- link->link_speed = ETH_SPEED_NUM_100M;
+ link->link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case I40E_REG_SPEED_1:
- link->link_speed = ETH_SPEED_NUM_1G;
+ link->link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case I40E_REG_SPEED_2:
if (hw->mac.type == I40E_MAC_X722)
- link->link_speed = ETH_SPEED_NUM_2_5G;
+ link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
else
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case I40E_REG_SPEED_3:
if (hw->mac.type == I40E_MAC_X722) {
- link->link_speed = ETH_SPEED_NUM_5G;
+ link->link_speed = RTE_ETH_SPEED_NUM_5G;
} else {
reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
if (reg_val & I40E_REG_MACC_25GB)
- link->link_speed = ETH_SPEED_NUM_25G;
+ link->link_speed = RTE_ETH_SPEED_NUM_25G;
else
- link->link_speed = ETH_SPEED_NUM_40G;
+ link->link_speed = RTE_ETH_SPEED_NUM_40G;
}
break;
case I40E_REG_SPEED_4:
if (hw->mac.type == I40E_MAC_X722)
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
else
- link->link_speed = ETH_SPEED_NUM_20G;
+ link->link_speed = RTE_ETH_SPEED_NUM_20G;
break;
default:
PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
@@ -2930,8 +2930,8 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
status = i40e_aq_get_link_info(hw, enable_lse,
&link_status, NULL);
if (unlikely(status != I40E_SUCCESS)) {
- link->link_speed = ETH_SPEED_NUM_NONE;
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR, "Failed to get link info");
return;
}
@@ -2946,28 +2946,28 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
/* Parse the link status */
switch (link_status.link_speed) {
case I40E_LINK_SPEED_100MB:
- link->link_speed = ETH_SPEED_NUM_100M;
+ link->link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case I40E_LINK_SPEED_1GB:
- link->link_speed = ETH_SPEED_NUM_1G;
+ link->link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case I40E_LINK_SPEED_10GB:
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case I40E_LINK_SPEED_20GB:
- link->link_speed = ETH_SPEED_NUM_20G;
+ link->link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case I40E_LINK_SPEED_25GB:
- link->link_speed = ETH_SPEED_NUM_25G;
+ link->link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case I40E_LINK_SPEED_40GB:
- link->link_speed = ETH_SPEED_NUM_40G;
+ link->link_speed = RTE_ETH_SPEED_NUM_40G;
break;
default:
if (link->link_status)
- link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+ link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
else
- link->link_speed = ETH_SPEED_NUM_NONE;
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
}
}
@@ -2984,9 +2984,9 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
memset(&link, 0, sizeof(link));
/* i40e uses full duplex only */
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
if (!wait_to_complete && !enable_lse)
update_link_reg(hw, &link);
@@ -3720,33 +3720,33 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_RSS_HASH;
-
- dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+ dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
dev_info->tx_queue_offload_capa;
dev_info->dev_capa =
RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -3805,7 +3805,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
/* For XL710 */
- dev_info->speed_capa = ETH_LINK_SPEED_40G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_40G;
dev_info->default_rxportconf.nb_queues = 2;
dev_info->default_txportconf.nb_queues = 2;
if (dev->data->nb_rx_queues == 1)
@@ -3819,17 +3819,17 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
/* For XXV710 */
- dev_info->speed_capa = ETH_LINK_SPEED_25G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_25G;
dev_info->default_rxportconf.nb_queues = 1;
dev_info->default_txportconf.nb_queues = 1;
dev_info->default_rxportconf.ring_size = 256;
dev_info->default_txportconf.ring_size = 256;
} else {
/* For X710 */
- dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
dev_info->default_rxportconf.nb_queues = 1;
dev_info->default_txportconf.nb_queues = 1;
- if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+ if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_10G) {
dev_info->default_rxportconf.ring_size = 512;
dev_info->default_txportconf.ring_size = 256;
} else {
@@ -3868,7 +3868,7 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
int ret;
if (qinq) {
- if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
reg_id = 2;
}
@@ -3915,12 +3915,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
int qinq = dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_EXTEND;
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
- if ((vlan_type != ETH_VLAN_TYPE_INNER &&
- vlan_type != ETH_VLAN_TYPE_OUTER) ||
- (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
+ if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+ vlan_type != RTE_ETH_VLAN_TYPE_OUTER) ||
+ (!qinq && vlan_type == RTE_ETH_VLAN_TYPE_INNER)) {
PMD_DRV_LOG(ERR,
"Unsupported vlan type.");
return -EINVAL;
@@ -3934,12 +3934,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
/* 802.1ad frames ability is added in NVM API 1.7*/
if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
if (qinq) {
- if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
hw->first_tag = rte_cpu_to_le_16(tpid);
- else if (vlan_type == ETH_VLAN_TYPE_INNER)
+ else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER)
hw->second_tag = rte_cpu_to_le_16(tpid);
} else {
- if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
hw->second_tag = rte_cpu_to_le_16(tpid);
}
ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
@@ -3998,37 +3998,37 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
}
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
- i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
RTE_ETHER_TYPE_VLAN);
- i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+ i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
RTE_ETHER_TYPE_VLAN);
}
else
i40e_vsi_config_double_vlan(vsi, FALSE);
}
- if (mask & ETH_QINQ_STRIP_MASK) {
+ if (mask & RTE_ETH_QINQ_STRIP_MASK) {
/* Enable or disable outer VLAN stripping */
- if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
@@ -4111,17 +4111,17 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
/* Return current mode according to actual setting*/
switch (hw->fc.current_mode) {
case I40E_FC_FULL:
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
break;
case I40E_FC_TX_PAUSE:
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
break;
case I40E_FC_RX_PAUSE:
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
break;
case I40E_FC_NONE:
default:
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
};
return 0;
@@ -4137,10 +4137,10 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
struct i40e_hw *hw;
struct i40e_pf *pf;
enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
- [RTE_FC_NONE] = I40E_FC_NONE,
- [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
- [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
- [RTE_FC_FULL] = I40E_FC_FULL
+ [RTE_ETH_FC_NONE] = I40E_FC_NONE,
+ [RTE_ETH_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
+ [RTE_ETH_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
+ [RTE_ETH_FC_FULL] = I40E_FC_FULL
};
/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
@@ -4287,7 +4287,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
@@ -4440,7 +4440,7 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
int ret;
if (reta_size != lut_size ||
- reta_size > ETH_RSS_RETA_SIZE_512) {
+ reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
PMD_DRV_LOG(ERR,
"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
reta_size, lut_size);
@@ -4456,8 +4456,8 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
if (ret)
goto out;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
lut[i] = reta_conf[idx].reta[shift];
}
@@ -4483,7 +4483,7 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
int ret;
if (reta_size != lut_size ||
- reta_size > ETH_RSS_RETA_SIZE_512) {
+ reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
PMD_DRV_LOG(ERR,
"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
reta_size, lut_size);
@@ -4500,8 +4500,8 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
if (ret)
goto out;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] = lut[i];
}
@@ -4818,7 +4818,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
hw->func_caps.num_vsis - vsi_count);
pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
- ETH_64_POOLS);
+ RTE_ETH_64_POOLS);
if (pf->max_nb_vmdq_vsi) {
pf->flags |= I40E_FLAG_VMDQ;
pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
@@ -6104,10 +6104,10 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev)
int mask = 0;
/* Apply vlan offload setting */
- mask = ETH_VLAN_STRIP_MASK |
- ETH_QINQ_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_QINQ_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
ret = i40e_vlan_offload_set(dev, mask);
if (ret) {
PMD_DRV_LOG(INFO, "Failed to update vlan offload");
@@ -6236,9 +6236,9 @@ i40e_pf_setup(struct i40e_pf *pf)
/* Configure filter control */
memset(&settings, 0, sizeof(settings));
- if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
+ if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_128)
settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
- else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
+ else if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_512)
settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
else {
PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
@@ -7098,7 +7098,7 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi,
{
uint32_t vid_idx, vid_bit;
- if (vlan_id > ETH_VLAN_ID_MAX)
+ if (vlan_id > RTE_ETH_VLAN_ID_MAX)
return 0;
vid_idx = I40E_VFTA_IDX(vlan_id);
@@ -7133,7 +7133,7 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi,
struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
int ret;
- if (vlan_id > ETH_VLAN_ID_MAX)
+ if (vlan_id > RTE_ETH_VLAN_ID_MAX)
return;
i40e_store_vlan_filter(vsi, vlan_id, on);
@@ -7727,25 +7727,25 @@ static int
i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
{
switch (filter_type) {
- case RTE_TUNNEL_FILTER_IMAC_IVLAN:
+ case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
break;
- case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
+ case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
break;
- case RTE_TUNNEL_FILTER_IMAC_TENID:
+ case RTE_ETH_TUNNEL_FILTER_IMAC_TENID:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
break;
- case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
+ case RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC:
*flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
break;
- case ETH_TUNNEL_FILTER_IMAC:
+ case RTE_ETH_TUNNEL_FILTER_IMAC:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
break;
- case ETH_TUNNEL_FILTER_OIP:
+ case RTE_ETH_TUNNEL_FILTER_OIP:
*flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
break;
- case ETH_TUNNEL_FILTER_IIP:
+ case RTE_ETH_TUNNEL_FILTER_IIP:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
break;
default:
@@ -8711,16 +8711,16 @@ i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
I40E_AQC_TUNNEL_TYPE_VXLAN);
break;
- case RTE_TUNNEL_TYPE_VXLAN_GPE:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
- case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_TEREDO:
PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
ret = -1;
break;
@@ -8746,12 +8746,12 @@ i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- case RTE_TUNNEL_TYPE_VXLAN_GPE:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
- case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_TEREDO:
PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
ret = -1;
break;
@@ -8843,7 +8843,7 @@ int
i40e_pf_reset_rss_reta(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->adapter->hw;
- uint8_t lut[ETH_RSS_RETA_SIZE_512];
+ uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
uint32_t i;
int num;
@@ -8851,7 +8851,7 @@ i40e_pf_reset_rss_reta(struct i40e_pf *pf)
* configured. It's necessary to calculate the actual PF
* queues that are configured.
*/
- if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
num = i40e_pf_calc_configured_queues_num(pf);
else
num = pf->dev_data->nb_rx_queues;
@@ -8930,7 +8930,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
if (!(rss_hf & pf->adapter->flow_types_mask) ||
- !(mq_mode & ETH_MQ_RX_RSS_FLAG))
+ !(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
return 0;
hw = I40E_PF_TO_HW(pf);
@@ -10267,16 +10267,16 @@ i40e_start_timecounters(struct rte_eth_dev *dev)
rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
- case ETH_SPEED_NUM_40G:
- case ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_25G:
tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
break;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
break;
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
break;
@@ -10504,7 +10504,7 @@ i40e_parse_dcb_configure(struct rte_eth_dev *dev,
else
*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
- if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
dcb_cfg->pfc.willing = 0;
dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
dcb_cfg->pfc.pfcenable = *tc_map;
@@ -11012,7 +11012,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
uint16_t bsf, tc_mapping;
int i, j = 0;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
else
dcb_info->nb_tcs = 1;
@@ -11060,7 +11060,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
}
j++;
- } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
+ } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, RTE_ETH_MAX_VMDQ_POOL));
return 0;
}
@@ -147,17 +147,17 @@ enum i40e_flxpld_layer_idx {
I40E_FLAG_RSS_AQ_CAPABLE)
#define I40E_RSS_OFFLOAD_ALL ( \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV4_OTHER | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
- ETH_RSS_NONFRAG_IPV6_OTHER | \
- ETH_RSS_L2_PAYLOAD)
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+ RTE_ETH_RSS_L2_PAYLOAD)
/* All bits of RSS hash enable for X722*/
#define I40E_RSS_HENA_ALL_X722 ( \
@@ -1063,7 +1063,7 @@ struct i40e_rte_flow_rss_conf {
uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t)]; /**< Hash key. */
- uint16_t queue[ETH_RSS_RETA_SIZE_512]; /**< Queues indices to use. */
+ uint16_t queue[RTE_ETH_RSS_RETA_SIZE_512]; /**< Queues indices to use. */
bool symmetric_enable; /**< true, if enable symmetric */
uint64_t config_pctypes; /**< All PCTYPES with the flow */
@@ -2015,7 +2015,7 @@ i40e_get_outer_vlan(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int qinq = dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_EXTEND;
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
@@ -3601,13 +3601,13 @@ i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
}
static uint16_t i40e_supported_tunnel_filter_types[] = {
- ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
- ETH_TUNNEL_FILTER_IVLAN,
- ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
- ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
- ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
- ETH_TUNNEL_FILTER_IMAC,
- ETH_TUNNEL_FILTER_IMAC,
+ RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID |
+ RTE_ETH_TUNNEL_FILTER_IVLAN,
+ RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
+ RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID,
+ RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID |
+ RTE_ETH_TUNNEL_FILTER_IMAC,
+ RTE_ETH_TUNNEL_FILTER_IMAC,
};
static int
@@ -3697,12 +3697,12 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
rte_memcpy(&filter->outer_mac,
ð_spec->dst,
RTE_ETHER_ADDR_LEN);
- filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
} else {
rte_memcpy(&filter->inner_mac,
ð_spec->dst,
RTE_ETHER_ADDR_LEN);
- filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
}
}
break;
@@ -3724,7 +3724,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
filter->inner_vlan =
rte_be_to_cpu_16(vlan_spec->tci) &
I40E_VLAN_TCI_MASK;
- filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
}
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
@@ -3798,7 +3798,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
vxlan_spec->vni, 3);
filter->tenant_id =
rte_be_to_cpu_32(tenant_id_be);
- filter_type |= ETH_TUNNEL_FILTER_TENID;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
}
vxlan_flag = 1;
@@ -3927,12 +3927,12 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
rte_memcpy(&filter->outer_mac,
ð_spec->dst,
RTE_ETHER_ADDR_LEN);
- filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
} else {
rte_memcpy(&filter->inner_mac,
ð_spec->dst,
RTE_ETHER_ADDR_LEN);
- filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
}
}
@@ -3955,7 +3955,7 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
filter->inner_vlan =
rte_be_to_cpu_16(vlan_spec->tci) &
I40E_VLAN_TCI_MASK;
- filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
}
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
@@ -4050,7 +4050,7 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
nvgre_spec->tni, 3);
filter->tenant_id =
rte_be_to_cpu_32(tenant_id_be);
- filter_type |= ETH_TUNNEL_FILTER_TENID;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
}
nvgre_flag = 1;
@@ -105,47 +105,47 @@ struct i40e_hash_map_rss_inset {
const struct i40e_hash_map_rss_inset i40e_hash_rss_inset[] = {
/* IPv4 */
- { ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
- { ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+ { RTE_ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+ { RTE_ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
- { ETH_RSS_NONFRAG_IPV4_OTHER,
+ { RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
- { ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
- { ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ { RTE_ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
- { ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ { RTE_ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
/* IPv6 */
- { ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
- { ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+ { RTE_ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+ { RTE_ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
- { ETH_RSS_NONFRAG_IPV6_OTHER,
+ { RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
- { ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ { RTE_ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
- { ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ { RTE_ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
- { ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ { RTE_ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
/* Port */
- { ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+ { RTE_ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
/* Ether */
- { ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
- { ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
+ { RTE_ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
+ { RTE_ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
/* VLAN */
- { ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
- { ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
+ { RTE_ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
+ { RTE_ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
};
#define I40E_HASH_VOID_NEXT_ALLOW BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
@@ -208,30 +208,30 @@ struct i40e_hash_match_pattern {
#define I40E_HASH_MAP_CUS_PATTERN(pattern, rss_mask, cus_pctype) { \
pattern, rss_mask, true, cus_pctype }
-#define I40E_HASH_L2_RSS_MASK (ETH_RSS_VLAN | ETH_RSS_ETH | \
- ETH_RSS_L2_SRC_ONLY | \
- ETH_RSS_L2_DST_ONLY)
+#define I40E_HASH_L2_RSS_MASK (RTE_ETH_RSS_VLAN | RTE_ETH_RSS_ETH | \
+ RTE_ETH_RSS_L2_SRC_ONLY | \
+ RTE_ETH_RSS_L2_DST_ONLY)
#define I40E_HASH_L23_RSS_MASK (I40E_HASH_L2_RSS_MASK | \
- ETH_RSS_L3_SRC_ONLY | \
- ETH_RSS_L3_DST_ONLY)
+ RTE_ETH_RSS_L3_SRC_ONLY | \
+ RTE_ETH_RSS_L3_DST_ONLY)
-#define I40E_HASH_IPV4_L23_RSS_MASK (ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
-#define I40E_HASH_IPV6_L23_RSS_MASK (ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV4_L23_RSS_MASK (RTE_ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV6_L23_RSS_MASK (RTE_ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
#define I40E_HASH_L234_RSS_MASK (I40E_HASH_L23_RSS_MASK | \
- ETH_RSS_PORT | ETH_RSS_L4_SRC_ONLY | \
- ETH_RSS_L4_DST_ONLY)
+ RTE_ETH_RSS_PORT | RTE_ETH_RSS_L4_SRC_ONLY | \
+ RTE_ETH_RSS_L4_DST_ONLY)
-#define I40E_HASH_IPV4_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV4)
-#define I40E_HASH_IPV6_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV6)
+#define I40E_HASH_IPV4_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV4)
+#define I40E_HASH_IPV6_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV6)
-#define I40E_HASH_L4_TYPES (ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+#define I40E_HASH_L4_TYPES (RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
/* Current supported patterns and RSS types.
* All items that have the same pattern types are together.
@@ -239,72 +239,72 @@ struct i40e_hash_match_pattern {
static const struct i40e_hash_match_pattern match_patterns[] = {
/* Ether */
I40E_HASH_MAP_PATTERN(I40E_PHINT_ETH,
- ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
+ RTE_ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
I40E_FILTER_PCTYPE_L2_PAYLOAD),
/* IPv4 */
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
- ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
+ RTE_ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
I40E_FILTER_PCTYPE_FRAG_IPV4),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
- ETH_RSS_NONFRAG_IPV4_OTHER |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
I40E_HASH_IPV4_L23_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_TCP,
- ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
I40E_HASH_IPV4_L234_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_UDP,
- ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP |
I40E_HASH_IPV4_L234_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_SCTP,
- ETH_RSS_NONFRAG_IPV4_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
I40E_HASH_IPV4_L234_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP),
/* IPv6 */
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
- ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
+ RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
I40E_FILTER_PCTYPE_FRAG_IPV6),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
- ETH_RSS_NONFRAG_IPV6_OTHER |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
I40E_HASH_IPV6_L23_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_FRAG,
- ETH_RSS_FRAG_IPV6 | I40E_HASH_L23_RSS_MASK,
+ RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_L23_RSS_MASK,
I40E_FILTER_PCTYPE_FRAG_IPV6),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_TCP,
- ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
I40E_HASH_IPV6_L234_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_UDP,
- ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP |
I40E_HASH_IPV6_L234_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_SCTP,
- ETH_RSS_NONFRAG_IPV6_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
I40E_HASH_IPV6_L234_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP),
/* ESP */
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_ESP,
- ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
+ RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_ESP,
- ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
+ RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_UDP_ESP,
- ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
+ RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_UDP_ESP,
- ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
+ RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
/* GTPC */
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPC,
@@ -319,27 +319,27 @@ static const struct i40e_hash_match_pattern match_patterns[] = {
I40E_HASH_IPV4_L234_RSS_MASK,
I40E_CUSTOMIZED_GTPU),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV4,
- ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+ RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV6,
- ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+ RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU,
I40E_HASH_IPV6_L234_RSS_MASK,
I40E_CUSTOMIZED_GTPU),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV4,
- ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+ RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV6,
- ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+ RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
/* L2TPV3 */
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_L2TPV3,
- ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
+ RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_L2TPV3,
- ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
+ RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
/* AH */
- I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, ETH_RSS_AH,
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, RTE_ETH_RSS_AH,
I40E_CUSTOMIZED_AH_IPV4),
- I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, ETH_RSS_AH,
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, RTE_ETH_RSS_AH,
I40E_CUSTOMIZED_AH_IPV6),
};
@@ -575,29 +575,29 @@ i40e_hash_get_inset(uint64_t rss_types)
/* If SRC_ONLY and DST_ONLY of the same level are used simultaneously,
* it is the same case as none of them are added.
*/
- mask = rss_types & (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY);
- if (mask == ETH_RSS_L2_SRC_ONLY)
+ mask = rss_types & (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY);
+ if (mask == RTE_ETH_RSS_L2_SRC_ONLY)
inset &= ~I40E_INSET_DMAC;
- else if (mask == ETH_RSS_L2_DST_ONLY)
+ else if (mask == RTE_ETH_RSS_L2_DST_ONLY)
inset &= ~I40E_INSET_SMAC;
- mask = rss_types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
- if (mask == ETH_RSS_L3_SRC_ONLY)
+ mask = rss_types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
+ if (mask == RTE_ETH_RSS_L3_SRC_ONLY)
inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST);
- else if (mask == ETH_RSS_L3_DST_ONLY)
+ else if (mask == RTE_ETH_RSS_L3_DST_ONLY)
inset &= ~(I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
- mask = rss_types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
- if (mask == ETH_RSS_L4_SRC_ONLY)
+ mask = rss_types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
+ if (mask == RTE_ETH_RSS_L4_SRC_ONLY)
inset &= ~I40E_INSET_DST_PORT;
- else if (mask == ETH_RSS_L4_DST_ONLY)
+ else if (mask == RTE_ETH_RSS_L4_DST_ONLY)
inset &= ~I40E_INSET_SRC_PORT;
if (rss_types & I40E_HASH_L4_TYPES) {
uint64_t l3_mask = rss_types &
- (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+ (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
uint64_t l4_mask = rss_types &
- (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+ (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
if (l3_mask && !l4_mask)
inset &= ~(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT);
@@ -836,7 +836,7 @@ i40e_hash_config(struct i40e_pf *pf,
/* Update lookup table */
if (rss_info->queue_num > 0) {
- uint8_t lut[ETH_RSS_RETA_SIZE_512];
+ uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
uint32_t i, j = 0;
for (i = 0; i < hw->func_caps.rss_table_size; i++) {
@@ -943,7 +943,7 @@ i40e_hash_parse_queues(const struct rte_eth_dev *dev,
"RSS key is ignored when queues specified");
pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
max_queue = i40e_pf_calc_configured_queues_num(pf);
else
max_queue = pf->dev_data->nb_rx_queues;
@@ -1081,22 +1081,22 @@ i40e_hash_validate_rss_types(uint64_t rss_types)
uint64_t type, mask;
/* Validate L2 */
- type = ETH_RSS_ETH & rss_types;
- mask = (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY) & rss_types;
+ type = RTE_ETH_RSS_ETH & rss_types;
+ mask = (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY) & rss_types;
if (!type && mask)
return false;
/* Validate L3 */
- type = (I40E_HASH_L4_TYPES | ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_IPV6 |
- ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
- mask = (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY) & rss_types;
+ type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
+ mask = (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY) & rss_types;
if (!type && mask)
return false;
/* Validate L4 */
- type = (I40E_HASH_L4_TYPES | ETH_RSS_PORT) & rss_types;
- mask = (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY) & rss_types;
+ type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_PORT) & rss_types;
+ mask = (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY) & rss_types;
if (!type && mask)
return false;
@@ -1207,24 +1207,24 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
event.event_data.link_event.link_status =
dev->data->dev_link.link_status;
- /* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
+ /* need to convert the RTE_ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
switch (dev->data->dev_link.link_speed) {
- case ETH_SPEED_NUM_100M:
+ case RTE_ETH_SPEED_NUM_100M:
event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
break;
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
break;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
break;
- case ETH_SPEED_NUM_20G:
+ case RTE_ETH_SPEED_NUM_20G:
event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
break;
- case ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_25G:
event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
break;
- case ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_40G:
event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
break;
default:
@@ -1329,7 +1329,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
for (i = 0; i < tx_rs_thresh; i++)
rte_prefetch0((txep + i)->mbuf);
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
if (k) {
for (j = 0; j != k; j += RTE_I40E_TX_MAX_FREE_BUF_SZ) {
for (i = 0; i < RTE_I40E_TX_MAX_FREE_BUF_SZ; ++i, ++txep) {
@@ -1995,7 +1995,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -2243,7 +2243,7 @@ i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
}
/* check simple tx conflict */
if (ad->tx_simple_allowed) {
- if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
+ if ((txq->offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) {
PMD_DRV_LOG(ERR, "No-simple tx is required.");
return -EINVAL;
@@ -3417,7 +3417,7 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
/* Use a simple Tx queue if possible (only fast free is allowed) */
ad->tx_simple_allowed =
(txq->offloads ==
- (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
ad->tx_vec_allowed = (ad->tx_simple_allowed &&
txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);
@@ -120,7 +120,7 @@ struct i40e_rx_queue {
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
- uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
+ uint64_t offloads; /**< Rx offload flags of RTE_ETH_RX_OFFLOAD_* */
const struct rte_memzone *mz;
};
@@ -166,7 +166,7 @@ struct i40e_tx_queue {
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
- uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
+ uint64_t offloads; /**< Tx offload flags of RTE_ETH_RX_OFFLOAD_* */
const struct rte_memzone *mz;
};
@@ -900,7 +900,7 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
txep = (void *)txq->sw_ring;
txep += txq->tx_next_dd - (n - 1);
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
struct rte_mempool *mp = txep[0].mbuf->pool;
void **cache_objs;
struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
@@ -100,7 +100,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
*/
txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
for (i = 0; i < n; i++) {
free[i] = txep[i].mbuf;
txep[i].mbuf = NULL;
@@ -211,7 +211,7 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
- struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+ struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
struct i40e_rx_queue *rxq;
uint16_t desc, i;
bool first_queue;
@@ -221,11 +221,11 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
return -1;
/* no header split support */
- if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
return -1;
/* no QinQ support */
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
return -1;
/**
@@ -42,30 +42,30 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
- dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+ dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_VLAN_FILTER;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -385,19 +385,19 @@ i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
return -EINVAL;
}
- if (mask & ETH_VLAN_FILTER_MASK) {
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
/* Enable or disable VLAN filtering offload */
if (ethdev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER)
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
return i40e_vsi_config_vlan_filter(vsi, TRUE);
else
return i40e_vsi_config_vlan_filter(vsi, FALSE);
}
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping offload */
if (ethdev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP)
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
return i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
return i40e_vsi_config_vlan_stripping(vsi, FALSE);
@@ -50,18 +50,18 @@
VIRTCHNL_VF_OFFLOAD_RX_POLLING)
#define IAVF_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV4_OTHER | \
- ETH_RSS_IPV6 | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
- ETH_RSS_NONFRAG_IPV6_OTHER)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
#define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
@@ -266,53 +266,53 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
static const uint64_t map_hena_rss[] = {
/* IPv4 */
[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
- ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
- ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
- ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
- ETH_RSS_NONFRAG_IPV4_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
- ETH_RSS_NONFRAG_IPV4_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
- ETH_RSS_NONFRAG_IPV4_SCTP,
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
- ETH_RSS_NONFRAG_IPV4_OTHER,
- [IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4,
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+ [IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
/* IPv6 */
[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
- ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
- ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
- ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
- ETH_RSS_NONFRAG_IPV6_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
- ETH_RSS_NONFRAG_IPV6_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
- ETH_RSS_NONFRAG_IPV6_SCTP,
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
- ETH_RSS_NONFRAG_IPV6_OTHER,
- [IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6,
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+ [IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
/* L2 Payload */
- [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD
+ [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
};
- const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_SCTP |
- ETH_RSS_NONFRAG_IPV4_OTHER |
- ETH_RSS_FRAG_IPV4;
+ const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
+ RTE_ETH_RSS_FRAG_IPV4;
- const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV6_SCTP |
- ETH_RSS_NONFRAG_IPV6_OTHER |
- ETH_RSS_FRAG_IPV6;
+ const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+ RTE_ETH_RSS_FRAG_IPV6;
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
@@ -331,13 +331,13 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
}
/**
- * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2
+ * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
* generalizations of all other IPv4 and IPv6 RSS types.
*/
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & RTE_ETH_RSS_IPV4)
rss_hf |= ipv4_rss;
- if (rss_hf & ETH_RSS_IPV6)
+ if (rss_hf & RTE_ETH_RSS_IPV6)
rss_hf |= ipv6_rss;
RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
@@ -363,10 +363,10 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
}
if (valid_rss_hf & ipv4_rss)
- valid_rss_hf |= rss_hf & ETH_RSS_IPV4;
+ valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
if (valid_rss_hf & ipv6_rss)
- valid_rss_hf |= rss_hf & ETH_RSS_IPV6;
+ valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
if (rss_hf & ~valid_rss_hf)
PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
@@ -467,7 +467,7 @@ iavf_dev_vlan_insert_set(struct rte_eth_dev *dev)
return 0;
enable = !!(dev->data->dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_VLAN_INSERT);
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
iavf_config_vlan_insert_v2(adapter, enable);
return 0;
@@ -479,10 +479,10 @@ iavf_dev_init_vlan(struct rte_eth_dev *dev)
int err;
err = iavf_dev_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK |
- ETH_QINQ_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_QINQ_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK);
if (err) {
PMD_DRV_LOG(ERR, "Failed to update vlan offload");
return err;
@@ -512,8 +512,8 @@ iavf_dev_configure(struct rte_eth_dev *dev)
ad->rx_vec_allowed = true;
ad->tx_vec_allowed = true;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* Large VF setting */
if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
@@ -611,7 +611,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
}
rxq->max_pkt_len = max_pkt_len;
- if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
rxq->max_pkt_len > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -961,34 +961,34 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
@@ -1048,42 +1048,42 @@ iavf_dev_link_update(struct rte_eth_dev *dev,
*/
switch (vf->link_speed) {
case 10:
- new_link.link_speed = ETH_SPEED_NUM_10M;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
break;
case 100:
- new_link.link_speed = ETH_SPEED_NUM_100M;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case 1000:
- new_link.link_speed = ETH_SPEED_NUM_1G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case 10000:
- new_link.link_speed = ETH_SPEED_NUM_10G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case 20000:
- new_link.link_speed = ETH_SPEED_NUM_20G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case 25000:
- new_link.link_speed = ETH_SPEED_NUM_25G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case 40000:
- new_link.link_speed = ETH_SPEED_NUM_40G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case 50000:
- new_link.link_speed = ETH_SPEED_NUM_50G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
break;
case 100000:
- new_link.link_speed = ETH_SPEED_NUM_100G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
break;
default:
- new_link.link_speed = ETH_SPEED_NUM_NONE;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
}
- new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- new_link.link_status = vf->link_up ? ETH_LINK_UP :
- ETH_LINK_DOWN;
+ new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
+ RTE_ETH_LINK_DOWN;
new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
return rte_eth_linkstatus_set(dev, &new_link);
}
@@ -1231,14 +1231,14 @@ iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
bool enable;
int err;
- if (mask & ETH_VLAN_FILTER_MASK) {
- enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
iavf_iterate_vlan_filters_v2(dev, enable);
}
- if (mask & ETH_VLAN_STRIP_MASK) {
- enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
err = iavf_config_vlan_strip_v2(adapter, enable);
/* If not support, the stripping is already disabled by PF */
@@ -1267,9 +1267,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
return -ENOTSUP;
/* Vlan stripping setting */
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
err = iavf_enable_vlan_strip(adapter);
else
err = iavf_disable_vlan_strip(adapter);
@@ -1311,8 +1311,8 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
rte_memcpy(lut, vf->rss_lut, reta_size);
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
lut[i] = reta_conf[idx].reta[shift];
}
@@ -1348,8 +1348,8 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] = vf->rss_lut[i];
}
@@ -1556,7 +1556,7 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
ret = iavf_query_stats(adapter, &pstats);
if (ret == 0) {
uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_KEEP_CRC) ? 0 :
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
RTE_ETHER_CRC_LEN;
iavf_update_stats(vsi, pstats);
stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
@@ -395,90 +395,90 @@ struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv6_tcp_tmplt = {
/* rss type super set */
/* IPv4 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV4 (ETH_RSS_ETH | ETH_RSS_IPV4 | \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_IPV4_CHKSUM)
+#define IAVF_RSS_TYPE_OUTER_IPV4 (RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_IPV4_CHKSUM)
#define IAVF_RSS_TYPE_OUTER_IPV4_UDP (IAVF_RSS_TYPE_OUTER_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define IAVF_RSS_TYPE_OUTER_IPV4_TCP (IAVF_RSS_TYPE_OUTER_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define IAVF_RSS_TYPE_OUTER_IPV4_SCTP (IAVF_RSS_TYPE_OUTER_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_L4_CHKSUM)
/* IPv6 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV6 (ETH_RSS_ETH | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_OUTER_IPV6 (RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
#define IAVF_RSS_TYPE_OUTER_IPV6_FRAG (IAVF_RSS_TYPE_OUTER_IPV6 | \
- ETH_RSS_FRAG_IPV6)
+ RTE_ETH_RSS_FRAG_IPV6)
#define IAVF_RSS_TYPE_OUTER_IPV6_UDP (IAVF_RSS_TYPE_OUTER_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define IAVF_RSS_TYPE_OUTER_IPV6_TCP (IAVF_RSS_TYPE_OUTER_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define IAVF_RSS_TYPE_OUTER_IPV6_SCTP (IAVF_RSS_TYPE_OUTER_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_L4_CHKSUM)
/* VLAN IPV4 */
#define IAVF_RSS_TYPE_VLAN_IPV4 (IAVF_RSS_TYPE_OUTER_IPV4 | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define IAVF_RSS_TYPE_VLAN_IPV4_UDP (IAVF_RSS_TYPE_OUTER_IPV4_UDP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define IAVF_RSS_TYPE_VLAN_IPV4_TCP (IAVF_RSS_TYPE_OUTER_IPV4_TCP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define IAVF_RSS_TYPE_VLAN_IPV4_SCTP (IAVF_RSS_TYPE_OUTER_IPV4_SCTP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
/* VLAN IPv6 */
#define IAVF_RSS_TYPE_VLAN_IPV6 (IAVF_RSS_TYPE_OUTER_IPV6 | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define IAVF_RSS_TYPE_VLAN_IPV6_FRAG (IAVF_RSS_TYPE_OUTER_IPV6_FRAG | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define IAVF_RSS_TYPE_VLAN_IPV6_UDP (IAVF_RSS_TYPE_OUTER_IPV6_UDP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define IAVF_RSS_TYPE_VLAN_IPV6_TCP (IAVF_RSS_TYPE_OUTER_IPV6_TCP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define IAVF_RSS_TYPE_VLAN_IPV6_SCTP (IAVF_RSS_TYPE_OUTER_IPV6_SCTP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
/* IPv4 inner */
-#define IAVF_RSS_TYPE_INNER_IPV4 ETH_RSS_IPV4
-#define IAVF_RSS_TYPE_INNER_IPV4_UDP (ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV4_TCP (ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV4_SCTP (ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV4 RTE_ETH_RSS_IPV4
+#define IAVF_RSS_TYPE_INNER_IPV4_UDP (RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV4_TCP (RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV4_SCTP (RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
/* IPv6 inner */
-#define IAVF_RSS_TYPE_INNER_IPV6 ETH_RSS_IPV6
-#define IAVF_RSS_TYPE_INNER_IPV6_UDP (ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV6_TCP (ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV6_SCTP (ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV6 RTE_ETH_RSS_IPV6
+#define IAVF_RSS_TYPE_INNER_IPV6_UDP (RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV6_TCP (RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV6_SCTP (RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
/* GTPU IPv4 */
#define IAVF_RSS_TYPE_GTPU_IPV4 (IAVF_RSS_TYPE_INNER_IPV4 | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define IAVF_RSS_TYPE_GTPU_IPV4_UDP (IAVF_RSS_TYPE_INNER_IPV4_UDP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define IAVF_RSS_TYPE_GTPU_IPV4_TCP (IAVF_RSS_TYPE_INNER_IPV4_TCP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
/* GTPU IPv6 */
#define IAVF_RSS_TYPE_GTPU_IPV6 (IAVF_RSS_TYPE_INNER_IPV6 | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define IAVF_RSS_TYPE_GTPU_IPV6_UDP (IAVF_RSS_TYPE_INNER_IPV6_UDP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define IAVF_RSS_TYPE_GTPU_IPV6_TCP (IAVF_RSS_TYPE_INNER_IPV6_TCP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
/* ESP, AH, L2TPV3 and PFCP */
-#define IAVF_RSS_TYPE_IPV4_ESP (ETH_RSS_ESP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV4_AH (ETH_RSS_AH | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_ESP (ETH_RSS_ESP | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV6_AH (ETH_RSS_AH | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_L2TPV3 (ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_L2TPV3 (ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_PFCP (ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_PFCP (ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_ESP (RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV4_AH (RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_ESP (RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV6_AH (RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_L2TPV3 (RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_L2TPV3 (RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_PFCP (RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_PFCP (RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
/**
* Supported pattern for hash.
@@ -496,7 +496,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
{iavf_pattern_eth_vlan_ipv4_udp, IAVF_RSS_TYPE_VLAN_IPV4_UDP, &outer_ipv4_udp_tmplt},
{iavf_pattern_eth_vlan_ipv4_tcp, IAVF_RSS_TYPE_VLAN_IPV4_TCP, &outer_ipv4_tcp_tmplt},
{iavf_pattern_eth_vlan_ipv4_sctp, IAVF_RSS_TYPE_VLAN_IPV4_SCTP, &outer_ipv4_sctp_tmplt},
- {iavf_pattern_eth_ipv4_gtpu, ETH_RSS_IPV4, &outer_ipv4_udp_tmplt},
+ {iavf_pattern_eth_ipv4_gtpu, RTE_ETH_RSS_IPV4, &outer_ipv4_udp_tmplt},
{iavf_pattern_eth_ipv4_gtpu_ipv4, IAVF_RSS_TYPE_GTPU_IPV4, &inner_ipv4_tmplt},
{iavf_pattern_eth_ipv4_gtpu_ipv4_udp, IAVF_RSS_TYPE_GTPU_IPV4_UDP, &inner_ipv4_udp_tmplt},
{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp, IAVF_RSS_TYPE_GTPU_IPV4_TCP, &inner_ipv4_tcp_tmplt},
@@ -538,9 +538,9 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
{iavf_pattern_eth_ipv4_ah, IAVF_RSS_TYPE_IPV4_AH, &ipv4_ah_tmplt},
{iavf_pattern_eth_ipv4_l2tpv3, IAVF_RSS_TYPE_IPV4_L2TPV3, &ipv4_l2tpv3_tmplt},
{iavf_pattern_eth_ipv4_pfcp, IAVF_RSS_TYPE_IPV4_PFCP, &ipv4_pfcp_tmplt},
- {iavf_pattern_eth_ipv4_gtpc, ETH_RSS_IPV4, &ipv4_udp_gtpc_tmplt},
- {iavf_pattern_eth_ecpri, ETH_RSS_ECPRI, ð_ecpri_tmplt},
- {iavf_pattern_eth_ipv4_ecpri, ETH_RSS_ECPRI, &ipv4_ecpri_tmplt},
+ {iavf_pattern_eth_ipv4_gtpc, RTE_ETH_RSS_IPV4, &ipv4_udp_gtpc_tmplt},
+ {iavf_pattern_eth_ecpri, RTE_ETH_RSS_ECPRI, ð_ecpri_tmplt},
+ {iavf_pattern_eth_ipv4_ecpri, RTE_ETH_RSS_ECPRI, &ipv4_ecpri_tmplt},
{iavf_pattern_eth_ipv4_gre_ipv4, IAVF_RSS_TYPE_INNER_IPV4, &inner_ipv4_tmplt},
{iavf_pattern_eth_ipv6_gre_ipv4, IAVF_RSS_TYPE_INNER_IPV4, &inner_ipv4_tmplt},
{iavf_pattern_eth_ipv4_gre_ipv4_tcp, IAVF_RSS_TYPE_INNER_IPV4_TCP, &inner_ipv4_tcp_tmplt},
@@ -565,7 +565,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
{iavf_pattern_eth_vlan_ipv6_udp, IAVF_RSS_TYPE_VLAN_IPV6_UDP, &outer_ipv6_udp_tmplt},
{iavf_pattern_eth_vlan_ipv6_tcp, IAVF_RSS_TYPE_VLAN_IPV6_TCP, &outer_ipv6_tcp_tmplt},
{iavf_pattern_eth_vlan_ipv6_sctp, IAVF_RSS_TYPE_VLAN_IPV6_SCTP, &outer_ipv6_sctp_tmplt},
- {iavf_pattern_eth_ipv6_gtpu, ETH_RSS_IPV6, &outer_ipv6_udp_tmplt},
+ {iavf_pattern_eth_ipv6_gtpu, RTE_ETH_RSS_IPV6, &outer_ipv6_udp_tmplt},
{iavf_pattern_eth_ipv4_gtpu_ipv6, IAVF_RSS_TYPE_GTPU_IPV6, &inner_ipv6_tmplt},
{iavf_pattern_eth_ipv4_gtpu_ipv6_udp, IAVF_RSS_TYPE_GTPU_IPV6_UDP, &inner_ipv6_udp_tmplt},
{iavf_pattern_eth_ipv4_gtpu_ipv6_tcp, IAVF_RSS_TYPE_GTPU_IPV6_TCP, &inner_ipv6_tcp_tmplt},
@@ -607,7 +607,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
{iavf_pattern_eth_ipv6_ah, IAVF_RSS_TYPE_IPV6_AH, &ipv6_ah_tmplt},
{iavf_pattern_eth_ipv6_l2tpv3, IAVF_RSS_TYPE_IPV6_L2TPV3, &ipv6_l2tpv3_tmplt},
{iavf_pattern_eth_ipv6_pfcp, IAVF_RSS_TYPE_IPV6_PFCP, &ipv6_pfcp_tmplt},
- {iavf_pattern_eth_ipv6_gtpc, ETH_RSS_IPV6, &ipv6_udp_gtpc_tmplt},
+ {iavf_pattern_eth_ipv6_gtpc, RTE_ETH_RSS_IPV6, &ipv6_udp_gtpc_tmplt},
{iavf_pattern_eth_ipv4_gre_ipv6, IAVF_RSS_TYPE_INNER_IPV6, &inner_ipv6_tmplt},
{iavf_pattern_eth_ipv6_gre_ipv6, IAVF_RSS_TYPE_INNER_IPV6, &inner_ipv6_tmplt},
{iavf_pattern_eth_ipv4_gre_ipv6_tcp, IAVF_RSS_TYPE_INNER_IPV6_TCP, &inner_ipv6_tcp_tmplt},
@@ -648,52 +648,52 @@ iavf_rss_hash_set(struct iavf_adapter *ad, uint64_t rss_hf, bool add)
struct virtchnl_rss_cfg rss_cfg;
#define IAVF_RSS_HF_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
rss_cfg.rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
- if (rss_hf & ETH_RSS_IPV4) {
+ if (rss_hf & RTE_ETH_RSS_IPV4) {
rss_cfg.proto_hdrs = inner_ipv4_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
rss_cfg.proto_hdrs = inner_ipv4_udp_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
rss_cfg.proto_hdrs = inner_ipv4_tcp_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
rss_cfg.proto_hdrs = inner_ipv4_sctp_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
- if (rss_hf & ETH_RSS_IPV6) {
+ if (rss_hf & RTE_ETH_RSS_IPV6) {
rss_cfg.proto_hdrs = inner_ipv6_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
rss_cfg.proto_hdrs = inner_ipv6_udp_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
rss_cfg.proto_hdrs = inner_ipv6_tcp_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
rss_cfg.proto_hdrs = inner_ipv6_sctp_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
@@ -855,28 +855,28 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
hdr = &proto_hdrs->proto_hdr[i];
switch (hdr->type) {
case VIRTCHNL_PROTO_HDR_ETH:
- if (!(rss_type & ETH_RSS_ETH))
+ if (!(rss_type & RTE_ETH_RSS_ETH))
hdr->field_selector = 0;
- else if (rss_type & ETH_RSS_L2_SRC_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
REFINE_PROTO_FLD(DEL, ETH_DST);
- else if (rss_type & ETH_RSS_L2_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
REFINE_PROTO_FLD(DEL, ETH_SRC);
break;
case VIRTCHNL_PROTO_HDR_IPV4:
if (rss_type &
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_SCTP)) {
- if (rss_type & ETH_RSS_FRAG_IPV4) {
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+ if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
iavf_hash_add_fragment_hdr(proto_hdrs, i + 1);
- } else if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+ } else if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
REFINE_PROTO_FLD(DEL, IPV4_DST);
- } else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+ } else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
REFINE_PROTO_FLD(DEL, IPV4_SRC);
} else if (rss_type &
- (ETH_RSS_L4_SRC_ONLY |
- ETH_RSS_L4_DST_ONLY)) {
+ (RTE_ETH_RSS_L4_SRC_ONLY |
+ RTE_ETH_RSS_L4_DST_ONLY)) {
REFINE_PROTO_FLD(DEL, IPV4_DST);
REFINE_PROTO_FLD(DEL, IPV4_SRC);
}
@@ -884,39 +884,39 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
hdr->field_selector = 0;
}
- if (rss_type & ETH_RSS_IPV4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
REFINE_PROTO_FLD(ADD, IPV4_CHKSUM);
break;
case VIRTCHNL_PROTO_HDR_IPV4_FRAG:
if (rss_type &
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_SCTP)) {
- if (rss_type & ETH_RSS_FRAG_IPV4)
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+ if (rss_type & RTE_ETH_RSS_FRAG_IPV4)
REFINE_PROTO_FLD(ADD, IPV4_FRAG_PKID);
} else {
hdr->field_selector = 0;
}
- if (rss_type & ETH_RSS_IPV4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
REFINE_PROTO_FLD(ADD, IPV4_CHKSUM);
break;
case VIRTCHNL_PROTO_HDR_IPV6:
if (rss_type &
- (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV6_SCTP)) {
- if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+ (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+ if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
REFINE_PROTO_FLD(DEL, IPV6_DST);
- } else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+ } else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
REFINE_PROTO_FLD(DEL, IPV6_SRC);
} else if (rss_type &
- (ETH_RSS_L4_SRC_ONLY |
- ETH_RSS_L4_DST_ONLY)) {
+ (RTE_ETH_RSS_L4_SRC_ONLY |
+ RTE_ETH_RSS_L4_DST_ONLY)) {
REFINE_PROTO_FLD(DEL, IPV6_DST);
REFINE_PROTO_FLD(DEL, IPV6_SRC);
}
@@ -933,7 +933,7 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
}
break;
case VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG:
- if (rss_type & ETH_RSS_FRAG_IPV6)
+ if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
REFINE_PROTO_FLD(ADD, IPV6_EH_FRAG_PKID);
else
hdr->field_selector = 0;
@@ -941,87 +941,87 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
break;
case VIRTCHNL_PROTO_HDR_UDP:
if (rss_type &
- (ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV6_UDP)) {
- if (rss_type & ETH_RSS_L4_SRC_ONLY)
+ (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+ if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
REFINE_PROTO_FLD(DEL, UDP_DST_PORT);
- else if (rss_type & ETH_RSS_L4_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
REFINE_PROTO_FLD(DEL, UDP_SRC_PORT);
else if (rss_type &
- (ETH_RSS_L3_SRC_ONLY |
- ETH_RSS_L3_DST_ONLY))
+ (RTE_ETH_RSS_L3_SRC_ONLY |
+ RTE_ETH_RSS_L3_DST_ONLY))
hdr->field_selector = 0;
} else {
hdr->field_selector = 0;
}
- if (rss_type & ETH_RSS_L4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
REFINE_PROTO_FLD(ADD, UDP_CHKSUM);
break;
case VIRTCHNL_PROTO_HDR_TCP:
if (rss_type &
- (ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV6_TCP)) {
- if (rss_type & ETH_RSS_L4_SRC_ONLY)
+ (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+ if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
REFINE_PROTO_FLD(DEL, TCP_DST_PORT);
- else if (rss_type & ETH_RSS_L4_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
REFINE_PROTO_FLD(DEL, TCP_SRC_PORT);
else if (rss_type &
- (ETH_RSS_L3_SRC_ONLY |
- ETH_RSS_L3_DST_ONLY))
+ (RTE_ETH_RSS_L3_SRC_ONLY |
+ RTE_ETH_RSS_L3_DST_ONLY))
hdr->field_selector = 0;
} else {
hdr->field_selector = 0;
}
- if (rss_type & ETH_RSS_L4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
REFINE_PROTO_FLD(ADD, TCP_CHKSUM);
break;
case VIRTCHNL_PROTO_HDR_SCTP:
if (rss_type &
- (ETH_RSS_NONFRAG_IPV4_SCTP |
- ETH_RSS_NONFRAG_IPV6_SCTP)) {
- if (rss_type & ETH_RSS_L4_SRC_ONLY)
+ (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+ if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
REFINE_PROTO_FLD(DEL, SCTP_DST_PORT);
- else if (rss_type & ETH_RSS_L4_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
REFINE_PROTO_FLD(DEL, SCTP_SRC_PORT);
else if (rss_type &
- (ETH_RSS_L3_SRC_ONLY |
- ETH_RSS_L3_DST_ONLY))
+ (RTE_ETH_RSS_L3_SRC_ONLY |
+ RTE_ETH_RSS_L3_DST_ONLY))
hdr->field_selector = 0;
} else {
hdr->field_selector = 0;
}
- if (rss_type & ETH_RSS_L4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
REFINE_PROTO_FLD(ADD, SCTP_CHKSUM);
break;
case VIRTCHNL_PROTO_HDR_S_VLAN:
- if (!(rss_type & ETH_RSS_S_VLAN))
+ if (!(rss_type & RTE_ETH_RSS_S_VLAN))
hdr->field_selector = 0;
break;
case VIRTCHNL_PROTO_HDR_C_VLAN:
- if (!(rss_type & ETH_RSS_C_VLAN))
+ if (!(rss_type & RTE_ETH_RSS_C_VLAN))
hdr->field_selector = 0;
break;
case VIRTCHNL_PROTO_HDR_L2TPV3:
- if (!(rss_type & ETH_RSS_L2TPV3))
+ if (!(rss_type & RTE_ETH_RSS_L2TPV3))
hdr->field_selector = 0;
break;
case VIRTCHNL_PROTO_HDR_ESP:
- if (!(rss_type & ETH_RSS_ESP))
+ if (!(rss_type & RTE_ETH_RSS_ESP))
hdr->field_selector = 0;
break;
case VIRTCHNL_PROTO_HDR_AH:
- if (!(rss_type & ETH_RSS_AH))
+ if (!(rss_type & RTE_ETH_RSS_AH))
hdr->field_selector = 0;
break;
case VIRTCHNL_PROTO_HDR_PFCP:
- if (!(rss_type & ETH_RSS_PFCP))
+ if (!(rss_type & RTE_ETH_RSS_PFCP))
hdr->field_selector = 0;
break;
case VIRTCHNL_PROTO_HDR_ECPRI:
- if (!(rss_type & ETH_RSS_ECPRI))
+ if (!(rss_type & RTE_ETH_RSS_ECPRI))
hdr->field_selector = 0;
break;
default:
@@ -1038,7 +1038,7 @@ iavf_refine_proto_hdrs_gtpu(struct virtchnl_proto_hdrs *proto_hdrs,
struct virtchnl_proto_hdr *hdr;
int i;
- if (!(rss_type & ETH_RSS_GTPU))
+ if (!(rss_type & RTE_ETH_RSS_GTPU))
return;
for (i = 0; i < proto_hdrs->count; i++) {
@@ -1163,10 +1163,10 @@ static void iavf_refine_proto_hdrs(struct virtchnl_proto_hdrs *proto_hdrs,
}
static uint64_t invalid_rss_comb[] = {
- ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
- ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
- ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
- ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
RTE_ETH_RSS_L3_PRE32 | RTE_ETH_RSS_L3_PRE40 |
RTE_ETH_RSS_L3_PRE48 | RTE_ETH_RSS_L3_PRE56 |
RTE_ETH_RSS_L3_PRE96
@@ -1177,27 +1177,27 @@ struct rss_attr_type {
uint64_t type;
};
-#define VALID_RSS_IPV4_L4 (ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4 (RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
-#define VALID_RSS_IPV6_L4 (ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4 (RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
-#define VALID_RSS_IPV4 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4 (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6 (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6 (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
VALID_RSS_IPV6_L4)
#define VALID_RSS_L3 (VALID_RSS_IPV4 | VALID_RSS_IPV6)
#define VALID_RSS_L4 (VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
-#define VALID_RSS_ATTR (ETH_RSS_L3_SRC_ONLY | \
- ETH_RSS_L3_DST_ONLY | \
- ETH_RSS_L4_SRC_ONLY | \
- ETH_RSS_L4_DST_ONLY | \
- ETH_RSS_L2_SRC_ONLY | \
- ETH_RSS_L2_DST_ONLY | \
+#define VALID_RSS_ATTR (RTE_ETH_RSS_L3_SRC_ONLY | \
+ RTE_ETH_RSS_L3_DST_ONLY | \
+ RTE_ETH_RSS_L4_SRC_ONLY | \
+ RTE_ETH_RSS_L4_DST_ONLY | \
+ RTE_ETH_RSS_L2_SRC_ONLY | \
+ RTE_ETH_RSS_L2_DST_ONLY | \
RTE_ETH_RSS_L3_PRE64)
#define INVALID_RSS_ATTR (RTE_ETH_RSS_L3_PRE32 | \
@@ -1207,9 +1207,9 @@ struct rss_attr_type {
RTE_ETH_RSS_L3_PRE96)
static struct rss_attr_type rss_attr_to_valid_type[] = {
- {ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY, ETH_RSS_ETH},
- {ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY, VALID_RSS_L3},
- {ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY, VALID_RSS_L4},
+ {RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY, RTE_ETH_RSS_ETH},
+ {RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY, VALID_RSS_L3},
+ {RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY, VALID_RSS_L4},
/* current ipv6 prefix only supports prefix 64 bits*/
{RTE_ETH_RSS_L3_PRE64, VALID_RSS_IPV6},
{INVALID_RSS_ATTR, 0}
@@ -1226,15 +1226,15 @@ iavf_any_invalid_rss_type(enum rte_eth_hash_function rss_func,
* hash function.
*/
if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
- if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
- ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+ if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+ RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
return true;
if (!(rss_type &
- (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
- ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
return true;
}
@@ -617,7 +617,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->vsi = vsi;
rxq->offloads = offloads;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -24,22 +24,22 @@
#define IAVF_VPMD_TX_MAX_FREE_BUF 64
#define IAVF_TX_NO_VECTOR_FLAGS ( \
- DEV_TX_OFFLOAD_MULTI_SEGS | \
- DEV_TX_OFFLOAD_TCP_TSO)
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO)
#define IAVF_TX_VECTOR_OFFLOAD ( \
- DEV_TX_OFFLOAD_VLAN_INSERT | \
- DEV_TX_OFFLOAD_QINQ_INSERT | \
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_SCTP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM)
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
#define IAVF_RX_VECTOR_OFFLOAD ( \
- DEV_RX_OFFLOAD_CHECKSUM | \
- DEV_RX_OFFLOAD_SCTP_CKSUM | \
- DEV_RX_OFFLOAD_VLAN | \
- DEV_RX_OFFLOAD_RSS_HASH)
+ RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_VLAN | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define IAVF_VECTOR_PATH 0
#define IAVF_VECTOR_OFFLOAD_PATH 1
@@ -906,7 +906,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
*/
- if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
+ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh7 =
@@ -958,7 +958,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
(_mm256_castsi128_si256(raw_desc_bh0),
raw_desc_bh1, 1);
- if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
/**
* to shift the 32b RSS hash value to the
* highest 32b of each 128b before mask
@@ -1141,7 +1141,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
*/
- if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
+ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh7 =
@@ -1193,7 +1193,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
(_mm256_castsi128_si256(raw_desc_bh0),
raw_desc_bh1, 1);
- if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
/**
* to shift the 32b RSS hash value to the
* highest 32b of each 128b before mask
@@ -1721,7 +1721,7 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
txep = (void *)txq->sw_ring;
txep += txq->next_dd - (n - 1);
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
struct rte_mempool *mp = txep[0].mbuf->pool;
struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
rte_lcore_id());
@@ -819,7 +819,7 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
*/
- if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh3 =
_mm_load_si128
@@ -835,7 +835,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)
PMD_DRV_LOG(DEBUG, "RSS is not supported");
return -ENOTSUP;
}
- if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
/* set all lut items to default queue */
memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
@@ -95,7 +95,7 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
}
rxq->max_pkt_len = max_pkt_len;
- if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -582,7 +582,7 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
return ret;
}
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
@@ -644,7 +644,7 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev)
}
ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
ad->pf.adapter_stopped = 1;
hw->tm_conf.committed = false;
@@ -660,8 +660,8 @@ ice_dcf_dev_configure(struct rte_eth_dev *dev)
ad->rx_bulk_alloc_allowed = true;
ad->tx_simple_allowed = true;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
return 0;
}
@@ -683,27 +683,27 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -933,42 +933,42 @@ ice_dcf_link_update(struct rte_eth_dev *dev,
*/
switch (hw->link_speed) {
case 10:
- new_link.link_speed = ETH_SPEED_NUM_10M;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
break;
case 100:
- new_link.link_speed = ETH_SPEED_NUM_100M;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case 1000:
- new_link.link_speed = ETH_SPEED_NUM_1G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case 10000:
- new_link.link_speed = ETH_SPEED_NUM_10G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case 20000:
- new_link.link_speed = ETH_SPEED_NUM_20G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case 25000:
- new_link.link_speed = ETH_SPEED_NUM_25G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case 40000:
- new_link.link_speed = ETH_SPEED_NUM_40G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case 50000:
- new_link.link_speed = ETH_SPEED_NUM_50G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
break;
case 100000:
- new_link.link_speed = ETH_SPEED_NUM_100G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
break;
default:
- new_link.link_speed = ETH_SPEED_NUM_NONE;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
}
- new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- new_link.link_status = hw->link_up ? ETH_LINK_UP :
- ETH_LINK_DOWN;
+ new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ new_link.link_status = hw->link_up ? RTE_ETH_LINK_UP :
+ RTE_ETH_LINK_DOWN;
new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
return rte_eth_linkstatus_set(dev, &new_link);
}
@@ -987,11 +987,11 @@ ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_ECPRI:
+ case RTE_ETH_TUNNEL_TYPE_ECPRI:
ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
udp_tunnel->udp_port);
break;
@@ -1018,8 +1018,8 @@ ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- case RTE_TUNNEL_TYPE_ECPRI:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_ECPRI:
ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
break;
default:
@@ -37,7 +37,7 @@ ice_dcf_vf_repr_dev_configure(struct rte_eth_dev *dev)
static int
ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
@@ -45,7 +45,7 @@ ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
static int
ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
@@ -143,28 +143,28 @@ ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -246,9 +246,9 @@ ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev *dev, int mask)
return -ENOTSUP;
/* Vlan stripping setting */
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
bool enable = !!(dev_conf->rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP);
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
if (enable && repr->outer_vlan_info.port_vlan_ena) {
PMD_DRV_LOG(ERR,
@@ -345,7 +345,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
if (!ice_dcf_vlan_offload_ena(repr))
return -ENOTSUP;
- if (vlan_type != ETH_VLAN_TYPE_OUTER) {
+ if (vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
PMD_DRV_LOG(ERR,
"Can accelerate only outer VLAN in QinQ\n");
return -EINVAL;
@@ -375,7 +375,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
if (repr->outer_vlan_info.stripping_ena) {
err = ice_dcf_vf_repr_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK);
+ RTE_ETH_VLAN_STRIP_MASK);
if (err) {
PMD_DRV_LOG(ERR,
"Failed to reset VLAN stripping : %d\n",
@@ -449,7 +449,7 @@ ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev)
int err;
err = ice_dcf_vf_repr_vlan_offload_set(vf_rep_eth_dev,
- ETH_VLAN_STRIP_MASK);
+ RTE_ETH_VLAN_STRIP_MASK);
if (err) {
PMD_DRV_LOG(ERR, "Failed to set VLAN offload");
return err;
@@ -1487,9 +1487,9 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
TAILQ_INIT(&vsi->mac_list);
TAILQ_INIT(&vsi->vlan_list);
- /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
+ /* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */
pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
- ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
+ RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 :
hw->func_caps.common_cap.rss_table_size;
pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
@@ -2993,14 +2993,14 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
int ret;
#define ICE_RSS_HF_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
if (ret)
@@ -3010,7 +3010,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
cfg.symm = 0;
cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
/* Configure RSS for IPv4 with src/dst addr as input set */
- if (rss_hf & ETH_RSS_IPV4) {
+ if (rss_hf & RTE_ETH_RSS_IPV4) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_FLOW_HASH_IPV4;
ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3020,7 +3020,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
}
/* Configure RSS for IPv6 with src/dst addr as input set */
- if (rss_hf & ETH_RSS_IPV6) {
+ if (rss_hf & RTE_ETH_RSS_IPV6) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_FLOW_HASH_IPV6;
ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3030,7 +3030,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
}
/* Configure RSS for udp4 with src/dst addr and port as input set */
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3041,7 +3041,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
}
/* Configure RSS for udp6 with src/dst addr and port as input set */
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3052,7 +3052,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
}
/* Configure RSS for tcp4 with src/dst addr and port as input set */
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3063,7 +3063,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
}
/* Configure RSS for tcp6 with src/dst addr and port as input set */
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3074,7 +3074,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
}
/* Configure RSS for sctp4 with src/dst addr and port as input set */
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_SCTP_IPV4;
@@ -3085,7 +3085,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
}
/* Configure RSS for sctp6 with src/dst addr and port as input set */
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_SCTP_IPV6;
@@ -3095,7 +3095,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
__func__, ret);
}
- if (rss_hf & ETH_RSS_IPV4) {
+ if (rss_hf & RTE_ETH_RSS_IPV4) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_FLOW_HASH_IPV4;
@@ -3105,7 +3105,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
__func__, ret);
}
- if (rss_hf & ETH_RSS_IPV6) {
+ if (rss_hf & RTE_ETH_RSS_IPV6) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_FLOW_HASH_IPV6;
@@ -3115,7 +3115,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
__func__, ret);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3125,7 +3125,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
__func__, ret);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3135,7 +3135,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
__func__, ret);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3145,7 +3145,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
__func__, ret);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3288,8 +3288,8 @@ ice_dev_configure(struct rte_eth_dev *dev)
ad->rx_bulk_alloc_allowed = true;
ad->tx_simple_allowed = true;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
if (dev->data->nb_rx_queues) {
ret = ice_init_rss(pf);
@@ -3569,8 +3569,8 @@ ice_dev_start(struct rte_eth_dev *dev)
ice_set_rx_function(dev);
ice_set_tx_function(dev);
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
ret = ice_vlan_offload_set(dev, mask);
if (ret) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
@@ -3682,40 +3682,40 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_FILTER;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->flow_type_rss_offloads = 0;
if (!is_safe_mode) {
dev_info->rx_offload_capa |=
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_RSS_HASH |
- DEV_RX_OFFLOAD_TIMESTAMP;
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP;
dev_info->tx_offload_capa |=
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
}
dev_info->rx_queue_offload_capa = 0;
- dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->reta_size = pf->hash_lut_size;
dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
@@ -3754,24 +3754,24 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.nb_align = ICE_ALIGN_RING_DESC,
};
- dev_info->speed_capa = ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_2_5G |
- ETH_LINK_SPEED_5G |
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_20G |
- ETH_LINK_SPEED_25G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_2_5G |
+ RTE_ETH_LINK_SPEED_5G |
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_20G |
+ RTE_ETH_LINK_SPEED_25G;
phy_type_low = hw->port_info->phy.phy_type_low;
phy_type_high = hw->port_info->phy.phy_type_high;
if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
- dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
- dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
@@ -3836,8 +3836,8 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
status = ice_aq_get_link_info(hw->port_info, enable_lse,
&link_status, NULL);
if (status != ICE_SUCCESS) {
- link.link_speed = ETH_SPEED_NUM_100M;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR, "Failed to get link info");
goto out;
}
@@ -3853,55 +3853,55 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
goto out;
/* Full-duplex operation at all supported speeds */
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
/* Parse the link status */
switch (link_status.link_speed) {
case ICE_AQ_LINK_SPEED_10MB:
- link.link_speed = ETH_SPEED_NUM_10M;
+ link.link_speed = RTE_ETH_SPEED_NUM_10M;
break;
case ICE_AQ_LINK_SPEED_100MB:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case ICE_AQ_LINK_SPEED_1000MB:
- link.link_speed = ETH_SPEED_NUM_1G;
+ link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case ICE_AQ_LINK_SPEED_2500MB:
- link.link_speed = ETH_SPEED_NUM_2_5G;
+ link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
break;
case ICE_AQ_LINK_SPEED_5GB:
- link.link_speed = ETH_SPEED_NUM_5G;
+ link.link_speed = RTE_ETH_SPEED_NUM_5G;
break;
case ICE_AQ_LINK_SPEED_10GB:
- link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case ICE_AQ_LINK_SPEED_20GB:
- link.link_speed = ETH_SPEED_NUM_20G;
+ link.link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case ICE_AQ_LINK_SPEED_25GB:
- link.link_speed = ETH_SPEED_NUM_25G;
+ link.link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case ICE_AQ_LINK_SPEED_40GB:
- link.link_speed = ETH_SPEED_NUM_40G;
+ link.link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case ICE_AQ_LINK_SPEED_50GB:
- link.link_speed = ETH_SPEED_NUM_50G;
+ link.link_speed = RTE_ETH_SPEED_NUM_50G;
break;
case ICE_AQ_LINK_SPEED_100GB:
- link.link_speed = ETH_SPEED_NUM_100G;
+ link.link_speed = RTE_ETH_SPEED_NUM_100G;
break;
case ICE_AQ_LINK_SPEED_UNKNOWN:
PMD_DRV_LOG(ERR, "Unknown link speed");
- link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
break;
default:
PMD_DRV_LOG(ERR, "None link speed");
- link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
}
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
out:
ice_atomic_write_link_status(dev, &link);
@@ -4377,15 +4377,15 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
ice_vsi_config_vlan_filter(vsi, true);
else
ice_vsi_config_vlan_filter(vsi, false);
}
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
ice_vsi_config_vlan_stripping(vsi, true);
else
ice_vsi_config_vlan_stripping(vsi, false);
@@ -4500,8 +4500,8 @@ ice_rss_reta_update(struct rte_eth_dev *dev,
goto out;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
lut[i] = reta_conf[idx].reta[shift];
}
@@ -4550,8 +4550,8 @@ ice_rss_reta_query(struct rte_eth_dev *dev,
goto out;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] = lut[i];
}
@@ -5460,7 +5460,7 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
break;
default:
@@ -5484,7 +5484,7 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
break;
default:
@@ -5505,7 +5505,7 @@ ice_timesync_enable(struct rte_eth_dev *dev)
int ret;
if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_TIMESTAMP)) {
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
return -1;
}
@@ -117,19 +117,19 @@
ICE_FLAG_VF_MAC_BY_PF)
#define ICE_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV4_OTHER | \
- ETH_RSS_IPV6 | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
- ETH_RSS_NONFRAG_IPV6_OTHER | \
- ETH_RSS_L2_PAYLOAD)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+ RTE_ETH_RSS_L2_PAYLOAD)
/**
* The overhead from MTU to max frame size.
@@ -39,27 +39,27 @@
#define ICE_IPV4_PROT BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)
#define ICE_IPV6_PROT BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)
-#define VALID_RSS_IPV4_L4 (ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4 (RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
-#define VALID_RSS_IPV6_L4 (ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4 (RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
-#define VALID_RSS_IPV4 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4 (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6 (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6 (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
VALID_RSS_IPV6_L4)
#define VALID_RSS_L3 (VALID_RSS_IPV4 | VALID_RSS_IPV6)
#define VALID_RSS_L4 (VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
-#define VALID_RSS_ATTR (ETH_RSS_L3_SRC_ONLY | \
- ETH_RSS_L3_DST_ONLY | \
- ETH_RSS_L4_SRC_ONLY | \
- ETH_RSS_L4_DST_ONLY | \
- ETH_RSS_L2_SRC_ONLY | \
- ETH_RSS_L2_DST_ONLY | \
+#define VALID_RSS_ATTR (RTE_ETH_RSS_L3_SRC_ONLY | \
+ RTE_ETH_RSS_L3_DST_ONLY | \
+ RTE_ETH_RSS_L4_SRC_ONLY | \
+ RTE_ETH_RSS_L4_DST_ONLY | \
+ RTE_ETH_RSS_L2_SRC_ONLY | \
+ RTE_ETH_RSS_L2_DST_ONLY | \
RTE_ETH_RSS_L3_PRE32 | \
RTE_ETH_RSS_L3_PRE48 | \
RTE_ETH_RSS_L3_PRE64)
@@ -373,87 +373,87 @@ struct ice_rss_hash_cfg eth_tmplt = {
};
/* IPv4 */
-#define ICE_RSS_TYPE_ETH_IPV4 (ETH_RSS_ETH | ETH_RSS_IPV4 | \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_IPV4_CHKSUM)
+#define ICE_RSS_TYPE_ETH_IPV4 (RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_IPV4_CHKSUM)
#define ICE_RSS_TYPE_ETH_IPV4_UDP (ICE_RSS_TYPE_ETH_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define ICE_RSS_TYPE_ETH_IPV4_TCP (ICE_RSS_TYPE_ETH_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define ICE_RSS_TYPE_ETH_IPV4_SCTP (ICE_RSS_TYPE_ETH_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_L4_CHKSUM)
-#define ICE_RSS_TYPE_IPV4 ETH_RSS_IPV4
-#define ICE_RSS_TYPE_IPV4_UDP (ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_UDP)
-#define ICE_RSS_TYPE_IPV4_TCP (ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP)
-#define ICE_RSS_TYPE_IPV4_SCTP (ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_SCTP)
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_L4_CHKSUM)
+#define ICE_RSS_TYPE_IPV4 RTE_ETH_RSS_IPV4
+#define ICE_RSS_TYPE_IPV4_UDP (RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define ICE_RSS_TYPE_IPV4_TCP (RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define ICE_RSS_TYPE_IPV4_SCTP (RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
/* IPv6 */
-#define ICE_RSS_TYPE_ETH_IPV6 (ETH_RSS_ETH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_ETH_IPV6_FRAG (ETH_RSS_ETH | ETH_RSS_IPV6 | \
- ETH_RSS_FRAG_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6 (RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6_FRAG (RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_FRAG_IPV6)
#define ICE_RSS_TYPE_ETH_IPV6_UDP (ICE_RSS_TYPE_ETH_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define ICE_RSS_TYPE_ETH_IPV6_TCP (ICE_RSS_TYPE_ETH_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define ICE_RSS_TYPE_ETH_IPV6_SCTP (ICE_RSS_TYPE_ETH_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
- ETH_RSS_L4_CHKSUM)
-#define ICE_RSS_TYPE_IPV6 ETH_RSS_IPV6
-#define ICE_RSS_TYPE_IPV6_UDP (ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_UDP)
-#define ICE_RSS_TYPE_IPV6_TCP (ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP)
-#define ICE_RSS_TYPE_IPV6_SCTP (ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_L4_CHKSUM)
+#define ICE_RSS_TYPE_IPV6 RTE_ETH_RSS_IPV6
+#define ICE_RSS_TYPE_IPV6_UDP (RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define ICE_RSS_TYPE_IPV6_TCP (RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define ICE_RSS_TYPE_IPV6_SCTP (RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
/* VLAN IPV4 */
#define ICE_RSS_TYPE_VLAN_IPV4 (ICE_RSS_TYPE_IPV4 | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
- ETH_RSS_FRAG_IPV4)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+ RTE_ETH_RSS_FRAG_IPV4)
#define ICE_RSS_TYPE_VLAN_IPV4_UDP (ICE_RSS_TYPE_IPV4_UDP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define ICE_RSS_TYPE_VLAN_IPV4_TCP (ICE_RSS_TYPE_IPV4_TCP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define ICE_RSS_TYPE_VLAN_IPV4_SCTP (ICE_RSS_TYPE_IPV4_SCTP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
/* VLAN IPv6 */
#define ICE_RSS_TYPE_VLAN_IPV6 (ICE_RSS_TYPE_IPV6 | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define ICE_RSS_TYPE_VLAN_IPV6_FRAG (ICE_RSS_TYPE_IPV6 | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
- ETH_RSS_FRAG_IPV6)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+ RTE_ETH_RSS_FRAG_IPV6)
#define ICE_RSS_TYPE_VLAN_IPV6_UDP (ICE_RSS_TYPE_IPV6_UDP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define ICE_RSS_TYPE_VLAN_IPV6_TCP (ICE_RSS_TYPE_IPV6_TCP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define ICE_RSS_TYPE_VLAN_IPV6_SCTP (ICE_RSS_TYPE_IPV6_SCTP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
/* GTPU IPv4 */
#define ICE_RSS_TYPE_GTPU_IPV4 (ICE_RSS_TYPE_IPV4 | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define ICE_RSS_TYPE_GTPU_IPV4_UDP (ICE_RSS_TYPE_IPV4_UDP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define ICE_RSS_TYPE_GTPU_IPV4_TCP (ICE_RSS_TYPE_IPV4_TCP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
/* GTPU IPv6 */
#define ICE_RSS_TYPE_GTPU_IPV6 (ICE_RSS_TYPE_IPV6 | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define ICE_RSS_TYPE_GTPU_IPV6_UDP (ICE_RSS_TYPE_IPV6_UDP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define ICE_RSS_TYPE_GTPU_IPV6_TCP (ICE_RSS_TYPE_IPV6_TCP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
/* PPPOE */
-#define ICE_RSS_TYPE_PPPOE (ETH_RSS_ETH | ETH_RSS_PPPOE)
+#define ICE_RSS_TYPE_PPPOE (RTE_ETH_RSS_ETH | RTE_ETH_RSS_PPPOE)
/* PPPOE IPv4 */
#define ICE_RSS_TYPE_PPPOE_IPV4 (ICE_RSS_TYPE_IPV4 | \
@@ -472,17 +472,17 @@ struct ice_rss_hash_cfg eth_tmplt = {
ICE_RSS_TYPE_PPPOE)
/* ESP, AH, L2TPV3 and PFCP */
-#define ICE_RSS_TYPE_IPV4_ESP (ETH_RSS_ESP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_ESP (ETH_RSS_ESP | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_AH (ETH_RSS_AH | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_AH (ETH_RSS_AH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_L2TPV3 (ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_L2TPV3 (ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_PFCP (ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_PFCP (ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_ESP (RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_ESP (RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_AH (RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_AH (RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_L2TPV3 (RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_L2TPV3 (RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_PFCP (RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_PFCP (RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
/* MAC */
-#define ICE_RSS_TYPE_ETH ETH_RSS_ETH
+#define ICE_RSS_TYPE_ETH RTE_ETH_RSS_ETH
/**
* Supported pattern for hash.
@@ -647,86 +647,86 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
uint64_t *hash_flds = &hash_cfg->hash_flds;
if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH) {
- if (!(rss_type & ETH_RSS_ETH))
+ if (!(rss_type & RTE_ETH_RSS_ETH))
*hash_flds &= ~ICE_FLOW_HASH_ETH;
- if (rss_type & ETH_RSS_L2_SRC_ONLY)
+ if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA));
- else if (rss_type & ETH_RSS_L2_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA));
*addl_hdrs &= ~ICE_FLOW_SEG_HDR_ETH;
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
- if (rss_type & ETH_RSS_ETH)
+ if (rss_type & RTE_ETH_RSS_ETH)
*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE);
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_VLAN) {
- if (rss_type & ETH_RSS_C_VLAN)
+ if (rss_type & RTE_ETH_RSS_C_VLAN)
*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN);
- else if (rss_type & ETH_RSS_S_VLAN)
+ else if (rss_type & RTE_ETH_RSS_S_VLAN)
*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN);
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
- if (!(rss_type & ETH_RSS_PPPOE))
+ if (!(rss_type & RTE_ETH_RSS_PPPOE))
*hash_flds &= ~ICE_FLOW_HASH_PPPOE_SESS_ID;
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
if (rss_type &
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_SCTP)) {
- if (rss_type & ETH_RSS_FRAG_IPV4) {
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+ if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
*addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
*addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
*hash_flds |=
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
}
- if (rss_type & ETH_RSS_L3_SRC_ONLY)
+ if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA));
- else if (rss_type & ETH_RSS_L3_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA));
else if (rss_type &
- (ETH_RSS_L4_SRC_ONLY |
- ETH_RSS_L4_DST_ONLY))
+ (RTE_ETH_RSS_L4_SRC_ONLY |
+ RTE_ETH_RSS_L4_DST_ONLY))
*hash_flds &= ~ICE_FLOW_HASH_IPV4;
} else {
*hash_flds &= ~ICE_FLOW_HASH_IPV4;
}
- if (rss_type & ETH_RSS_IPV4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM);
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
if (rss_type &
- (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV6_SCTP)) {
- if (rss_type & ETH_RSS_FRAG_IPV6)
+ (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+ if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
*hash_flds |=
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
- if (rss_type & ETH_RSS_L3_SRC_ONLY)
+ if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
- else if (rss_type & ETH_RSS_L3_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
else if (rss_type &
- (ETH_RSS_L4_SRC_ONLY |
- ETH_RSS_L4_DST_ONLY))
+ (RTE_ETH_RSS_L4_SRC_ONLY |
+ RTE_ETH_RSS_L4_DST_ONLY))
*hash_flds &= ~ICE_FLOW_HASH_IPV6;
} else {
*hash_flds &= ~ICE_FLOW_HASH_IPV6;
}
if (rss_type & RTE_ETH_RSS_L3_PRE32) {
- if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+ if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA));
- } else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+ } else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA));
} else {
@@ -735,10 +735,10 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
}
}
if (rss_type & RTE_ETH_RSS_L3_PRE48) {
- if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+ if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA));
- } else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+ } else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA));
} else {
@@ -747,10 +747,10 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
}
}
if (rss_type & RTE_ETH_RSS_L3_PRE64) {
- if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+ if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA));
- } else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+ } else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA));
} else {
@@ -762,81 +762,81 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
if (*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) {
if (rss_type &
- (ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV6_UDP)) {
- if (rss_type & ETH_RSS_L4_SRC_ONLY)
+ (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+ if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT));
- else if (rss_type & ETH_RSS_L4_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT));
else if (rss_type &
- (ETH_RSS_L3_SRC_ONLY |
- ETH_RSS_L3_DST_ONLY))
+ (RTE_ETH_RSS_L3_SRC_ONLY |
+ RTE_ETH_RSS_L3_DST_ONLY))
*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
} else {
*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
}
- if (rss_type & ETH_RSS_L4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM);
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_TCP) {
if (rss_type &
- (ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV6_TCP)) {
- if (rss_type & ETH_RSS_L4_SRC_ONLY)
+ (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+ if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT));
- else if (rss_type & ETH_RSS_L4_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT));
else if (rss_type &
- (ETH_RSS_L3_SRC_ONLY |
- ETH_RSS_L3_DST_ONLY))
+ (RTE_ETH_RSS_L3_SRC_ONLY |
+ RTE_ETH_RSS_L3_DST_ONLY))
*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
} else {
*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
}
- if (rss_type & ETH_RSS_L4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM);
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_SCTP) {
if (rss_type &
- (ETH_RSS_NONFRAG_IPV4_SCTP |
- ETH_RSS_NONFRAG_IPV6_SCTP)) {
- if (rss_type & ETH_RSS_L4_SRC_ONLY)
+ (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+ if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT));
- else if (rss_type & ETH_RSS_L4_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT));
else if (rss_type &
- (ETH_RSS_L3_SRC_ONLY |
- ETH_RSS_L3_DST_ONLY))
+ (RTE_ETH_RSS_L3_SRC_ONLY |
+ RTE_ETH_RSS_L3_DST_ONLY))
*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
} else {
*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
}
- if (rss_type & ETH_RSS_L4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM);
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
- if (!(rss_type & ETH_RSS_L2TPV3))
+ if (!(rss_type & RTE_ETH_RSS_L2TPV3))
*hash_flds &= ~ICE_FLOW_HASH_L2TPV3_SESS_ID;
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP) {
- if (!(rss_type & ETH_RSS_ESP))
+ if (!(rss_type & RTE_ETH_RSS_ESP))
*hash_flds &= ~ICE_FLOW_HASH_ESP_SPI;
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_AH) {
- if (!(rss_type & ETH_RSS_AH))
+ if (!(rss_type & RTE_ETH_RSS_AH))
*hash_flds &= ~ICE_FLOW_HASH_AH_SPI;
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_PFCP_SESSION) {
- if (!(rss_type & ETH_RSS_PFCP))
+ if (!(rss_type & RTE_ETH_RSS_PFCP))
*hash_flds &= ~ICE_FLOW_HASH_PFCP_SEID;
}
}
@@ -870,7 +870,7 @@ ice_refine_hash_cfg_gtpu(struct ice_rss_hash_cfg *hash_cfg,
uint64_t *hash_flds = &hash_cfg->hash_flds;
/* update hash field for gtpu eh/gtpu dwn/gtpu up. */
- if (!(rss_type & ETH_RSS_GTPU))
+ if (!(rss_type & RTE_ETH_RSS_GTPU))
return;
if (*addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN)
@@ -892,10 +892,10 @@ static void ice_refine_hash_cfg(struct ice_rss_hash_cfg *hash_cfg,
}
static uint64_t invalid_rss_comb[] = {
- ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
- ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
- ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
- ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
RTE_ETH_RSS_L3_PRE40 |
RTE_ETH_RSS_L3_PRE56 |
RTE_ETH_RSS_L3_PRE96
@@ -907,9 +907,9 @@ struct rss_attr_type {
};
static struct rss_attr_type rss_attr_to_valid_type[] = {
- {ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY, ETH_RSS_ETH},
- {ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY, VALID_RSS_L3},
- {ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY, VALID_RSS_L4},
+ {RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY, RTE_ETH_RSS_ETH},
+ {RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY, VALID_RSS_L3},
+ {RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY, VALID_RSS_L4},
/* current ipv6 prefix only supports prefix 64 bits*/
{RTE_ETH_RSS_L3_PRE32, VALID_RSS_IPV6},
{RTE_ETH_RSS_L3_PRE48, VALID_RSS_IPV6},
@@ -928,16 +928,16 @@ ice_any_invalid_rss_type(enum rte_eth_hash_function rss_func,
* hash function.
*/
if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
- if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
- ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+ if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+ RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
return true;
if (!(rss_type &
- (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
- ETH_RSS_FRAG_IPV4 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_FRAG_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
return true;
}
@@ -303,7 +303,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
}
}
- if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
/* Register mbuf field and flag for Rx timestamp */
err = rte_mbuf_dyn_rx_timestamp_register(
&ice_timestamp_dynfield_offset,
@@ -367,7 +367,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
- if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
regval |= QRXFLXP_CNTXT_TS_M;
ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
@@ -1117,7 +1117,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
rxq->reg_idx = vsi->base_queue + queue_idx;
rxq->port_id = dev->data->port_id;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -1624,7 +1624,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
if (ice_timestamp_dynflag > 0) {
@@ -1942,7 +1942,7 @@ ice_recv_scattered_pkts(void *rx_queue,
rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
if (ice_timestamp_dynflag > 0) {
@@ -2373,7 +2373,7 @@ ice_recv_pkts(void *rx_queue,
rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
if (ice_timestamp_dynflag > 0) {
@@ -2889,7 +2889,7 @@ ice_tx_free_bufs(struct ice_tx_queue *txq)
for (i = 0; i < txq->tx_rs_thresh; i++)
rte_prefetch0((txep + i)->mbuf);
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
rte_mempool_put(txep->mbuf->pool, txep->mbuf);
txep->mbuf = NULL;
@@ -3365,7 +3365,7 @@ ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
/* Use a simple Tx queue if possible (only fast free is allowed) */
ad->tx_simple_allowed =
(txq->offloads ==
- (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
if (ad->tx_simple_allowed)
@@ -474,7 +474,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
* will cause performance drop to get into this context.
*/
if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_RSS_HASH) {
+ RTE_ETH_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh7 =
_mm_load_si128
@@ -585,7 +585,7 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
* will cause performance drop to get into this context.
*/
if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_RSS_HASH) {
+ RTE_ETH_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh7 =
_mm_load_si128
@@ -995,7 +995,7 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
txep = (void *)txq->sw_ring;
txep += txq->tx_next_dd - (n - 1);
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
struct rte_mempool *mp = txep[0].mbuf->pool;
void **cache_objs;
struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
@@ -248,23 +248,23 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
}
#define ICE_TX_NO_VECTOR_FLAGS ( \
- DEV_TX_OFFLOAD_MULTI_SEGS | \
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO)
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO)
#define ICE_TX_VECTOR_OFFLOAD ( \
- DEV_TX_OFFLOAD_VLAN_INSERT | \
- DEV_TX_OFFLOAD_QINQ_INSERT | \
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_SCTP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM)
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
#define ICE_RX_VECTOR_OFFLOAD ( \
- DEV_RX_OFFLOAD_CHECKSUM | \
- DEV_RX_OFFLOAD_SCTP_CKSUM | \
- DEV_RX_OFFLOAD_VLAN | \
- DEV_RX_OFFLOAD_RSS_HASH)
+ RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_VLAN | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define ICE_VECTOR_PATH 0
#define ICE_VECTOR_OFFLOAD_PATH 1
@@ -287,7 +287,7 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
if (rxq->proto_xtr != PROTO_XTR_NONE)
return -1;
- if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
return -1;
if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
@@ -479,7 +479,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
* will cause performance drop to get into this context.
*/
if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_RSS_HASH) {
+ RTE_ETH_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh3 =
_mm_load_si128
@@ -307,8 +307,8 @@ igc_check_mq_mode(struct rte_eth_dev *dev)
return -EINVAL;
}
- if (rx_mq_mode != ETH_MQ_RX_NONE &&
- rx_mq_mode != ETH_MQ_RX_RSS) {
+ if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+ rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
/* RSS together with VMDq not supported*/
PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
rx_mq_mode);
@@ -318,7 +318,7 @@ igc_check_mq_mode(struct rte_eth_dev *dev)
/* To no break software that set invalid mode, only display
* warning if invalid mode is used.
*/
- if (tx_mq_mode != ETH_MQ_TX_NONE)
+ if (tx_mq_mode != RTE_ETH_MQ_TX_NONE)
PMD_INIT_LOG(WARNING,
"TX mode %d is not supported. Due to meaningless in this driver, just ignore",
tx_mq_mode);
@@ -334,8 +334,8 @@ eth_igc_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
ret = igc_check_mq_mode(dev);
if (ret != 0)
@@ -473,12 +473,12 @@ eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
uint16_t duplex, speed;
hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
link.link_duplex = (duplex == FULL_DUPLEX) ?
- ETH_LINK_FULL_DUPLEX :
- ETH_LINK_HALF_DUPLEX;
+ RTE_ETH_LINK_FULL_DUPLEX :
+ RTE_ETH_LINK_HALF_DUPLEX;
link.link_speed = speed;
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
if (speed == SPEED_2500) {
uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
@@ -490,9 +490,9 @@ eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
}
} else {
link.link_speed = 0;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
- link.link_status = ETH_LINK_DOWN;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
}
return rte_eth_linkstatus_set(dev, &link);
@@ -525,7 +525,7 @@ eth_igc_interrupt_action(struct rte_eth_dev *dev)
" Port %d: Link Up - speed %u Mbps - %s",
dev->data->port_id,
(unsigned int)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
else
PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -972,18 +972,18 @@ eth_igc_start(struct rte_eth_dev *dev)
/* VLAN Offload Settings */
eth_igc_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK);
/* Setup link speed and duplex */
speeds = &dev->data->dev_conf.link_speeds;
- if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
hw->mac.autoneg = 1;
} else {
int num_speeds = 0;
- if (*speeds & ETH_LINK_SPEED_FIXED) {
+ if (*speeds & RTE_ETH_LINK_SPEED_FIXED) {
PMD_DRV_LOG(ERR,
"Force speed mode currently not supported");
igc_dev_clear_queues(dev);
@@ -993,33 +993,33 @@ eth_igc_start(struct rte_eth_dev *dev)
hw->phy.autoneg_advertised = 0;
hw->mac.autoneg = 1;
- if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G)) {
+ if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G)) {
num_speeds = -1;
goto error_invalid_config;
}
- if (*speeds & ETH_LINK_SPEED_10M_HD) {
+ if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_10M) {
+ if (*speeds & RTE_ETH_LINK_SPEED_10M) {
hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_100M_HD) {
+ if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_100M) {
+ if (*speeds & RTE_ETH_LINK_SPEED_100M) {
hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_1G) {
+ if (*speeds & RTE_ETH_LINK_SPEED_1G) {
hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_2_5G) {
+ if (*speeds & RTE_ETH_LINK_SPEED_2_5G) {
hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
num_speeds++;
}
@@ -1482,14 +1482,14 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
- dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
dev_info->max_vmdq_pools = 0;
dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
- dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -1515,9 +1515,9 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->rx_desc_lim = rx_desc_lim;
dev_info->tx_desc_lim = tx_desc_lim;
- dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G;
dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -2141,13 +2141,13 @@ eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
rx_pause = 0;
if (rx_pause && tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
@@ -2179,16 +2179,16 @@ eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
}
switch (fc_conf->mode) {
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
hw->fc.requested_mode = igc_fc_none;
break;
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
hw->fc.requested_mode = igc_fc_rx_pause;
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
hw->fc.requested_mode = igc_fc_tx_pause;
break;
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
hw->fc.requested_mode = igc_fc_full;
break;
default:
@@ -2234,29 +2234,29 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev,
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
uint16_t i;
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR,
"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
- reta_size, ETH_RSS_RETA_SIZE_128);
+ reta_size, RTE_ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
- RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+ RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
/* set redirection table */
- for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+ for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
union igc_rss_reta_reg reta, reg;
uint16_t idx, shift;
uint8_t j, mask;
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
IGC_RSS_RDT_REG_SIZE_MASK);
/* if no need to update the register */
if (!mask ||
- shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+ shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
continue;
/* check mask whether need to read the register value first */
@@ -2290,29 +2290,29 @@ eth_igc_rss_reta_query(struct rte_eth_dev *dev,
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
uint16_t i;
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR,
"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
- reta_size, ETH_RSS_RETA_SIZE_128);
+ reta_size, RTE_ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
- RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+ RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
/* read redirection table */
- for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+ for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
union igc_rss_reta_reg reta;
uint16_t idx, shift;
uint8_t j, mask;
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
IGC_RSS_RDT_REG_SIZE_MASK);
/* if no need to read register */
if (!mask ||
- shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+ shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
continue;
/* read register and get the queue index */
@@ -2369,23 +2369,23 @@ eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
rss_hf = 0;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
- rss_hf |= ETH_RSS_IPV4;
+ rss_hf |= RTE_ETH_RSS_IPV4;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
- rss_hf |= ETH_RSS_IPV6;
+ rss_hf |= RTE_ETH_RSS_IPV6;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
- rss_hf |= ETH_RSS_IPV6_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_EX;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
- rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
- rss_hf |= ETH_RSS_IPV6_UDP_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
rss_conf->rss_hf |= rss_hf;
return 0;
@@ -2514,22 +2514,22 @@ eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
igc_vlan_hw_strip_enable(dev);
else
igc_vlan_hw_strip_disable(dev);
}
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
igc_vlan_hw_filter_enable(dev);
else
igc_vlan_hw_filter_disable(dev);
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
return igc_vlan_hw_extend_enable(dev);
else
return igc_vlan_hw_extend_disable(dev);
@@ -2547,7 +2547,7 @@ eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
uint32_t reg_val;
/* only outer TPID of double VLAN can be configured*/
- if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
reg_val = IGC_READ_REG(hw, IGC_VET);
reg_val = (reg_val & (~IGC_VET_EXT)) |
((uint32_t)tpid << IGC_VET_EXT_SHIFT);
@@ -66,37 +66,37 @@ extern "C" {
#define IGC_TX_MAX_MTU_SEG UINT8_MAX
#define IGC_RX_OFFLOAD_ALL ( \
- DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_VLAN_EXTEND | \
- DEV_RX_OFFLOAD_IPV4_CKSUM | \
- DEV_RX_OFFLOAD_UDP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_CKSUM | \
- DEV_RX_OFFLOAD_SCTP_CKSUM | \
- DEV_RX_OFFLOAD_KEEP_CRC | \
- DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_RSS_HASH)
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
+ RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define IGC_TX_OFFLOAD_ALL ( \
- DEV_TX_OFFLOAD_VLAN_INSERT | \
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_SCTP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_UDP_TSO | \
- DEV_TX_OFFLOAD_MULTI_SEGS)
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_UDP_TSO | \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
#define IGC_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_EX | \
+ RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
#define IGC_MAX_ETQF_FILTERS 3 /* etqf(3) is used for 1588 */
#define IGC_ETQF_FILTER_1588 3
@@ -127,7 +127,7 @@ struct igc_rx_queue {
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint32_t flags; /**< RX flags. */
- uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
+ uint64_t offloads; /**< offloads of RTE_ETH_RX_OFFLOAD_* */
};
/** Offload features */
@@ -209,7 +209,7 @@ struct igc_tx_queue {
/**< Start context position for transmit queue. */
struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
/**< Hardware context history.*/
- uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+ uint64_t offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
};
static inline uint64_t
@@ -847,23 +847,23 @@ igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf)
/* Set configured hashing protocols in MRQC register */
rss_hf = rss_conf->rss_hf;
mrqc = IGC_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & RTE_ETH_RSS_IPV4)
mrqc |= IGC_MRQC_RSS_FIELD_IPV4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
mrqc |= IGC_MRQC_RSS_FIELD_IPV4_TCP;
- if (rss_hf & ETH_RSS_IPV6)
+ if (rss_hf & RTE_ETH_RSS_IPV6)
mrqc |= IGC_MRQC_RSS_FIELD_IPV6;
- if (rss_hf & ETH_RSS_IPV6_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_EX)
mrqc |= IGC_MRQC_RSS_FIELD_IPV6_EX;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP;
- if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
- if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP_EX;
IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
}
@@ -1037,10 +1037,10 @@ igc_dev_mq_rx_configure(struct rte_eth_dev *dev)
}
switch (dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_RSS:
igc_rss_configure(dev);
break;
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_NONE:
/*
* configure RSS register for following,
* then disable the RSS logic
@@ -1111,7 +1111,7 @@ igc_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
- rxq->crc_len = (offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+ rxq->crc_len = (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
RTE_ETHER_CRC_LEN : 0;
bus_addr = rxq->rx_ring_phys_addr;
@@ -1177,7 +1177,7 @@ igc_rx_init(struct rte_eth_dev *dev)
IGC_WRITE_REG(hw, IGC_RXDCTL(rxq->reg_idx), rxdctl);
}
- if (offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
dev->data->scattered_rx = 1;
if (dev->data->scattered_rx) {
@@ -1221,20 +1221,20 @@ igc_rx_init(struct rte_eth_dev *dev)
rxcsum |= IGC_RXCSUM_PCSD;
/* Enable both L3/L4 rx checksum offload */
- if (offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ if (offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
rxcsum |= IGC_RXCSUM_IPOFL;
else
rxcsum &= ~IGC_RXCSUM_IPOFL;
if (offloads &
- (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+ (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
rxcsum |= IGC_RXCSUM_TUOFL;
- offloads |= DEV_RX_OFFLOAD_SCTP_CKSUM;
+ offloads |= RTE_ETH_RX_OFFLOAD_SCTP_CKSUM;
} else {
rxcsum &= ~IGC_RXCSUM_TUOFL;
}
- if (offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)
+ if (offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM)
rxcsum |= IGC_RXCSUM_CRCOFL;
else
rxcsum &= ~IGC_RXCSUM_CRCOFL;
@@ -1242,7 +1242,7 @@ igc_rx_init(struct rte_eth_dev *dev)
IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
/* Setup the Receive Control Register. */
- if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rctl &= ~IGC_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
else
rctl |= IGC_RCTL_SECRC; /* Strip Ethernet CRC. */
@@ -1279,12 +1279,12 @@ igc_rx_init(struct rte_eth_dev *dev)
IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->reg_idx));
- if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
dvmolr |= IGC_DVMOLR_STRVLAN;
else
dvmolr &= ~IGC_DVMOLR_STRVLAN;
- if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
dvmolr &= ~IGC_DVMOLR_STRCRC;
else
dvmolr |= IGC_DVMOLR_STRCRC;
@@ -2253,10 +2253,10 @@ eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev,
reg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id));
if (on) {
reg_val |= IGC_DVMOLR_STRVLAN;
- rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
} else {
reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN);
- rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
IGC_WRITE_REG(hw, IGC_DVMOLR(rx_queue_id), reg_val);
@@ -280,37 +280,37 @@ ionic_dev_link_update(struct rte_eth_dev *eth_dev,
memset(&link, 0, sizeof(link));
if (adapter->idev.port_info->config.an_enable) {
- link.link_autoneg = ETH_LINK_AUTONEG;
+ link.link_autoneg = RTE_ETH_LINK_AUTONEG;
}
if (!adapter->link_up ||
!(lif->state & IONIC_LIF_F_UP)) {
/* Interface is down */
- link.link_status = ETH_LINK_DOWN;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
- link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
} else {
/* Interface is up */
- link.link_status = ETH_LINK_UP;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
switch (adapter->link_speed) {
case 10000:
- link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case 25000:
- link.link_speed = ETH_SPEED_NUM_25G;
+ link.link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case 40000:
- link.link_speed = ETH_SPEED_NUM_40G;
+ link.link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case 50000:
- link.link_speed = ETH_SPEED_NUM_50G;
+ link.link_speed = RTE_ETH_SPEED_NUM_50G;
break;
case 100000:
- link.link_speed = ETH_SPEED_NUM_100G;
+ link.link_speed = RTE_ETH_SPEED_NUM_100G;
break;
default:
- link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
}
}
@@ -387,17 +387,17 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
dev_info->flow_type_rss_offloads = IONIC_ETH_RSS_OFFLOAD_ALL;
dev_info->speed_capa =
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G |
- ETH_LINK_SPEED_100G;
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G |
+ RTE_ETH_LINK_SPEED_100G;
/*
* Per-queue capabilities
* RTE does not support disabling a feature on a queue if it is
* enabled globally on the device. Thus the driver does not advertise
- * capabilities like DEV_TX_OFFLOAD_IPV4_CKSUM as per-queue even
+ * capabilities like RTE_ETH_TX_OFFLOAD_IPV4_CKSUM as per-queue even
* though the driver would be otherwise capable of disabling it on
* a per-queue basis.
*/
@@ -411,24 +411,24 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
*/
dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
0;
dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
0;
dev_info->rx_desc_lim = rx_desc_lim;
@@ -463,9 +463,9 @@ ionic_flow_ctrl_get(struct rte_eth_dev *eth_dev,
fc_conf->autoneg = 0;
if (idev->port_info->config.pause_type)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
}
return 0;
@@ -487,14 +487,14 @@ ionic_flow_ctrl_set(struct rte_eth_dev *eth_dev,
}
switch (fc_conf->mode) {
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
pause_type = IONIC_PORT_PAUSE_TYPE_NONE;
break;
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
pause_type = IONIC_PORT_PAUSE_TYPE_LINK;
break;
- case RTE_FC_RX_PAUSE:
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
return -ENOTSUP;
}
@@ -545,12 +545,12 @@ ionic_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
return -EINVAL;
}
- num = tbl_sz / RTE_RETA_GROUP_SIZE;
+ num = tbl_sz / RTE_ETH_RETA_GROUP_SIZE;
for (i = 0; i < num; i++) {
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
if (reta_conf[i].mask & ((uint64_t)1 << j)) {
- index = (i * RTE_RETA_GROUP_SIZE) + j;
+ index = (i * RTE_ETH_RETA_GROUP_SIZE) + j;
lif->rss_ind_tbl[index] = reta_conf[i].reta[j];
}
}
@@ -585,12 +585,12 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
return -EINVAL;
}
- num = reta_size / RTE_RETA_GROUP_SIZE;
+ num = reta_size / RTE_ETH_RETA_GROUP_SIZE;
for (i = 0; i < num; i++) {
memcpy(reta_conf->reta,
- &lif->rss_ind_tbl[i * RTE_RETA_GROUP_SIZE],
- RTE_RETA_GROUP_SIZE);
+ &lif->rss_ind_tbl[i * RTE_ETH_RETA_GROUP_SIZE],
+ RTE_ETH_RETA_GROUP_SIZE);
reta_conf++;
}
@@ -618,17 +618,17 @@ ionic_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
IONIC_RSS_HASH_KEY_SIZE);
if (lif->rss_types & IONIC_RSS_TYPE_IPV4)
- rss_hf |= ETH_RSS_IPV4;
+ rss_hf |= RTE_ETH_RSS_IPV4;
if (lif->rss_types & IONIC_RSS_TYPE_IPV4_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (lif->rss_types & IONIC_RSS_TYPE_IPV4_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (lif->rss_types & IONIC_RSS_TYPE_IPV6)
- rss_hf |= ETH_RSS_IPV6;
+ rss_hf |= RTE_ETH_RSS_IPV6;
if (lif->rss_types & IONIC_RSS_TYPE_IPV6_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
if (lif->rss_types & IONIC_RSS_TYPE_IPV6_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
rss_conf->rss_hf = rss_hf;
@@ -660,17 +660,17 @@ ionic_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
if (!lif->rss_ind_tbl)
return -EINVAL;
- if (rss_conf->rss_hf & ETH_RSS_IPV4)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
rss_types |= IONIC_RSS_TYPE_IPV4;
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
rss_types |= IONIC_RSS_TYPE_IPV4_TCP;
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
rss_types |= IONIC_RSS_TYPE_IPV4_UDP;
- if (rss_conf->rss_hf & ETH_RSS_IPV6)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
rss_types |= IONIC_RSS_TYPE_IPV6;
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
rss_types |= IONIC_RSS_TYPE_IPV6_TCP;
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
rss_types |= IONIC_RSS_TYPE_IPV6_UDP;
ionic_lif_rss_config(lif, rss_types, key, NULL);
@@ -842,15 +842,15 @@ ionic_dev_configure(struct rte_eth_dev *eth_dev)
static inline uint32_t
ionic_parse_link_speeds(uint16_t link_speeds)
{
- if (link_speeds & ETH_LINK_SPEED_100G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_100G)
return 100000;
- else if (link_speeds & ETH_LINK_SPEED_50G)
+ else if (link_speeds & RTE_ETH_LINK_SPEED_50G)
return 50000;
- else if (link_speeds & ETH_LINK_SPEED_40G)
+ else if (link_speeds & RTE_ETH_LINK_SPEED_40G)
return 40000;
- else if (link_speeds & ETH_LINK_SPEED_25G)
+ else if (link_speeds & RTE_ETH_LINK_SPEED_25G)
return 25000;
- else if (link_speeds & ETH_LINK_SPEED_10G)
+ else if (link_speeds & RTE_ETH_LINK_SPEED_10G)
return 10000;
else
return 0;
@@ -874,12 +874,12 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
IONIC_PRINT_CALL();
allowed_speeds =
- ETH_LINK_SPEED_FIXED |
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G |
- ETH_LINK_SPEED_100G;
+ RTE_ETH_LINK_SPEED_FIXED |
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G |
+ RTE_ETH_LINK_SPEED_100G;
if (dev_conf->link_speeds & ~allowed_speeds) {
IONIC_PRINT(ERR, "Invalid link setting");
@@ -896,7 +896,7 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
}
/* Configure link */
- an_enable = (dev_conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+ an_enable = (dev_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
ionic_dev_cmd_port_autoneg(idev, an_enable);
err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
@@ -8,12 +8,12 @@
#include <rte_ethdev.h>
#define IONIC_ETH_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP)
#define IONIC_ETH_DEV_TO_LIF(eth_dev) ((struct ionic_lif *) \
(eth_dev)->data->dev_private)
@@ -1688,12 +1688,12 @@ ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)
/*
* IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
- * set DEV_RX_OFFLOAD_VLAN_FILTER and ignore ETH_VLAN_FILTER_MASK
+ * set RTE_ETH_RX_OFFLOAD_VLAN_FILTER and ignore RTE_ETH_VLAN_FILTER_MASK
*/
- rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
else
lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
@@ -1733,19 +1733,19 @@ ionic_lif_configure(struct ionic_lif *lif)
/*
* NB: While it is true that RSS_HASH is always enabled on ionic,
* setting this flag unconditionally causes problems in DTS.
- * rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ * rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
*/
/* RX per-port */
- if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM ||
- rxmode->offloads & DEV_RX_OFFLOAD_UDP_CKSUM ||
- rxmode->offloads & DEV_RX_OFFLOAD_TCP_CKSUM)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ||
+ rxmode->offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ||
+ rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
lif->features |= IONIC_ETH_HW_RX_CSUM;
else
lif->features &= ~IONIC_ETH_HW_RX_CSUM;
- if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
lif->features |= IONIC_ETH_HW_RX_SG;
lif->eth_dev->data->scattered_rx = 1;
} else {
@@ -1754,30 +1754,30 @@ ionic_lif_configure(struct ionic_lif *lif)
}
/* Covers VLAN_STRIP */
- ionic_lif_configure_vlan_offload(lif, ETH_VLAN_STRIP_MASK);
+ ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
/* TX per-port */
- if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
- txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
- txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
- txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
- txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+ txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+ txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+ txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+ txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
lif->features |= IONIC_ETH_HW_TX_CSUM;
else
lif->features &= ~IONIC_ETH_HW_TX_CSUM;
- if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+ if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
else
lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
- if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
lif->features |= IONIC_ETH_HW_TX_SG;
else
lif->features &= ~IONIC_ETH_HW_TX_SG;
- if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+ if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
lif->features |= IONIC_ETH_HW_TSO;
lif->features |= IONIC_ETH_HW_TSO_IPV6;
lif->features |= IONIC_ETH_HW_TSO_ECN;
@@ -203,11 +203,11 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
txq->flags |= IONIC_QCQ_F_DEFERRED;
/* Convert the offload flags into queue flags */
- if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
txq->flags |= IONIC_QCQ_F_CSUM_L3;
- if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+ if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
txq->flags |= IONIC_QCQ_F_CSUM_TCP;
- if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
+ if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
txq->flags |= IONIC_QCQ_F_CSUM_UDP;
eth_dev->data->tx_queues[tx_queue_id] = txq;
@@ -743,11 +743,11 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
/*
* Note: the interface does not currently support
- * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
+ * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
* when the adapter will be able to keep the CRC and subtract
* it to the length for all received packets:
* if (eth_dev->data->dev_conf.rxmode.offloads &
- * DEV_RX_OFFLOAD_KEEP_CRC)
+ * RTE_ETH_RX_OFFLOAD_KEEP_CRC)
* rxq->crc_len = ETHER_CRC_LEN;
*/
@@ -50,11 +50,11 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
dev_info->speed_capa =
(hw->retimer.mac_type ==
IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) ?
- ETH_LINK_SPEED_10G :
+ RTE_ETH_LINK_SPEED_10G :
((hw->retimer.mac_type ==
IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) ?
- ETH_LINK_SPEED_25G :
- ETH_LINK_SPEED_AUTONEG);
+ RTE_ETH_LINK_SPEED_25G :
+ RTE_ETH_LINK_SPEED_AUTONEG);
dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
@@ -67,30 +67,30 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
};
dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_VLAN_FILTER;
-
- dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+
+ dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
dev_info->tx_queue_offload_capa;
dev_info->dev_capa =
@@ -2399,10 +2399,10 @@ ipn3ke_update_link(struct rte_rawdev *rawdev,
(uint64_t *)&link_speed);
switch (link_speed) {
case IFPGA_RAWDEV_LINK_SPEED_10GB:
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case IFPGA_RAWDEV_LINK_SPEED_25GB:
- link->link_speed = ETH_SPEED_NUM_25G;
+ link->link_speed = RTE_ETH_SPEED_NUM_25G;
break;
default:
IPN3KE_AFU_PMD_ERR("Unknown link speed info %u", link_speed);
@@ -2460,9 +2460,9 @@ ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev,
memset(&link, 0, sizeof(link));
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link.link_autoneg = !(ethdev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
rawdev = hw->rawdev;
ipn3ke_update_link(rawdev, rpst->port_id, &link);
@@ -2518,9 +2518,9 @@ ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst)
memset(&link, 0, sizeof(link));
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link.link_autoneg = !(rpst->ethdev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
rawdev = hw->rawdev;
ipn3ke_update_link(rawdev, rpst->port_id, &link);
@@ -1857,7 +1857,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
qinq &= IXGBE_DMATXCTL_GDV;
switch (vlan_type) {
- case ETH_VLAN_TYPE_INNER:
+ case RTE_ETH_VLAN_TYPE_INNER:
if (qinq) {
reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
@@ -1872,7 +1872,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
" by single VLAN");
}
break;
- case ETH_VLAN_TYPE_OUTER:
+ case RTE_ETH_VLAN_TYPE_OUTER:
if (qinq) {
/* Only the high 16-bits is valid */
IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
@@ -1959,10 +1959,10 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
if (on) {
rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
- rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
} else {
rxq->vlan_flags = PKT_RX_VLAN;
- rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
}
@@ -2083,7 +2083,7 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
if (hw->mac.type == ixgbe_mac_82598EB) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
ctrl |= IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
@@ -2100,7 +2100,7 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
ctrl |= IXGBE_RXDCTL_VME;
on = TRUE;
} else {
@@ -2122,17 +2122,17 @@ ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
struct rte_eth_rxmode *rxmode;
struct ixgbe_rx_queue *rxq;
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
rxmode = &dev->data->dev_conf.rxmode;
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
else
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
}
}
@@ -2143,19 +2143,18 @@ ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK)
ixgbe_vlan_hw_strip_config(dev);
- }
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
ixgbe_vlan_hw_filter_enable(dev);
else
ixgbe_vlan_hw_filter_disable(dev);
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
ixgbe_vlan_hw_extend_enable(dev);
else
ixgbe_vlan_hw_extend_disable(dev);
@@ -2194,10 +2193,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
switch (nb_rx_q) {
case 1:
case 2:
- RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+ RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
break;
case 4:
- RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+ RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
break;
default:
return -EINVAL;
@@ -2221,18 +2220,18 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
/* check multi-queue mode */
switch (dev_conf->rxmode.mq_mode) {
- case ETH_MQ_RX_VMDQ_DCB:
- PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+ case RTE_ETH_MQ_RX_VMDQ_DCB:
+ PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
break;
- case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
PMD_INIT_LOG(ERR, "SRIOV active,"
" unsupported mq_mode rx %d.",
dev_conf->rxmode.mq_mode);
return -EINVAL;
- case ETH_MQ_RX_RSS:
- case ETH_MQ_RX_VMDQ_RSS:
- dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+ case RTE_ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_RSS:
+ dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -2242,12 +2241,12 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
return -EINVAL;
}
break;
- case ETH_MQ_RX_VMDQ_ONLY:
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_VMDQ_ONLY:
+ case RTE_ETH_MQ_RX_NONE:
/* if nothing mq mode configure, use default scheme */
- dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+ dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
break;
- default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+ default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
/* SRIOV only works in VMDq enable mode */
PMD_INIT_LOG(ERR, "SRIOV is active,"
" wrong mq_mode rx %d.",
@@ -2256,12 +2255,12 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
}
switch (dev_conf->txmode.mq_mode) {
- case ETH_MQ_TX_VMDQ_DCB:
- PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
- dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+ case RTE_ETH_MQ_TX_VMDQ_DCB:
+ PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+ dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
break;
- default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
- dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+ default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
+ dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY;
break;
}
@@ -2276,13 +2275,13 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
return -EINVAL;
}
} else {
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+ if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
" not supported.");
return -EINVAL;
}
/* check configuration for vmdb+dcb mode */
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+ if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
const struct rte_eth_vmdq_dcb_conf *conf;
if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -2291,15 +2290,15 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
return -EINVAL;
}
conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
- if (!(conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
+ if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+ conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
" nb_queue_pools must be %d or %d.",
- ETH_16_POOLS, ETH_32_POOLS);
+ RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
return -EINVAL;
}
}
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
const struct rte_eth_vmdq_dcb_tx_conf *conf;
if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -2308,39 +2307,39 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
return -EINVAL;
}
conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
- if (!(conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
+ if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+ conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
" nb_queue_pools != %d and"
" nb_queue_pools != %d.",
- ETH_16_POOLS, ETH_32_POOLS);
+ RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
return -EINVAL;
}
}
/* For DCB mode check our configuration before we go further */
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+ if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
const struct rte_eth_dcb_rx_conf *conf;
conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
- if (!(conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
+ if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+ conf->nb_tcs == RTE_ETH_8_TCS)) {
PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
" and nb_tcs != %d.",
- ETH_4_TCS, ETH_8_TCS);
+ RTE_ETH_4_TCS, RTE_ETH_8_TCS);
return -EINVAL;
}
}
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
const struct rte_eth_dcb_tx_conf *conf;
conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
- if (!(conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
+ if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+ conf->nb_tcs == RTE_ETH_8_TCS)) {
PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
" and nb_tcs != %d.",
- ETH_4_TCS, ETH_8_TCS);
+ RTE_ETH_4_TCS, RTE_ETH_8_TCS);
return -EINVAL;
}
}
@@ -2349,7 +2348,7 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
* When DCB/VT is off, maximum number of queues changes,
* except for 82598EB, which remains constant.
*/
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
hw->mac.type != ixgbe_mac_82598EB) {
if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
PMD_INIT_LOG(ERR,
@@ -2373,8 +2372,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* multipe queue mode checking */
ret = ixgbe_check_mq_mode(dev);
@@ -2619,15 +2618,15 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
goto error;
}
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
err = ixgbe_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
goto error;
}
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
/* Enable vlan filtering for VMDq */
ixgbe_vmdq_vlan_hw_filter_enable(dev);
}
@@ -2704,17 +2703,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
- allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G |
- ETH_LINK_SPEED_10G;
+ allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_2_5G | RTE_ETH_LINK_SPEED_5G |
+ RTE_ETH_LINK_SPEED_10G;
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
- allowed_speeds = ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+ allowed_speeds = RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
break;
default:
- allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G;
+ allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_10G;
}
link_speeds = &dev->data->dev_conf.link_speeds;
@@ -2728,7 +2727,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
}
speed = 0x0;
- if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
speed = IXGBE_LINK_SPEED_82598_AUTONEG;
@@ -2746,17 +2745,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
speed = IXGBE_LINK_SPEED_82599_AUTONEG;
}
} else {
- if (*link_speeds & ETH_LINK_SPEED_10G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_5G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
speed |= IXGBE_LINK_SPEED_5GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_2_5G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_1G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
speed |= IXGBE_LINK_SPEED_1GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_100M)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
speed |= IXGBE_LINK_SPEED_100_FULL;
- if (*link_speeds & ETH_LINK_SPEED_10M)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
speed |= IXGBE_LINK_SPEED_10_FULL;
}
@@ -3832,7 +3831,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
* When DCB/VT is off, maximum number of queues changes,
* except for 82598EB, which remains constant.
*/
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
hw->mac.type != ixgbe_mac_82598EB)
dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
}
@@ -3842,9 +3841,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
dev_info->max_vfs = pci_dev->max_vfs;
if (hw->mac.type == ixgbe_mac_82598EB)
- dev_info->max_vmdq_pools = ETH_16_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
else
- dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
@@ -3883,21 +3882,21 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
- dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
- dev_info->speed_capa = ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
if (hw->mac.type == ixgbe_mac_X540 ||
hw->mac.type == ixgbe_mac_X540_vf ||
hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550_vf) {
- dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
}
if (hw->mac.type == ixgbe_mac_X550) {
- dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
- dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
}
/* Driver-preferred Rx/Tx parameters */
@@ -3966,9 +3965,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
dev_info->max_vfs = pci_dev->max_vfs;
if (hw->mac.type == ixgbe_mac_82598EB)
- dev_info->max_vmdq_pools = ETH_16_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
else
- dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
dev_info->rx_queue_offload_capa);
@@ -4211,11 +4210,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
u32 esdp_reg;
memset(&link, 0, sizeof(link));
- link.link_status = ETH_LINK_DOWN;
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
hw->mac.get_link_status = true;
@@ -4237,8 +4236,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
if (diag != 0) {
- link.link_speed = ETH_SPEED_NUM_100M;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
return rte_eth_linkstatus_set(dev, &link);
}
@@ -4274,37 +4273,37 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
return rte_eth_linkstatus_set(dev, &link);
}
- link.link_status = ETH_LINK_UP;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
switch (link_speed) {
default:
case IXGBE_LINK_SPEED_UNKNOWN:
- link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
break;
case IXGBE_LINK_SPEED_10_FULL:
- link.link_speed = ETH_SPEED_NUM_10M;
+ link.link_speed = RTE_ETH_SPEED_NUM_10M;
break;
case IXGBE_LINK_SPEED_100_FULL:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case IXGBE_LINK_SPEED_1GB_FULL:
- link.link_speed = ETH_SPEED_NUM_1G;
+ link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case IXGBE_LINK_SPEED_2_5GB_FULL:
- link.link_speed = ETH_SPEED_NUM_2_5G;
+ link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
break;
case IXGBE_LINK_SPEED_5GB_FULL:
- link.link_speed = ETH_SPEED_NUM_5G;
+ link.link_speed = RTE_ETH_SPEED_NUM_5G;
break;
case IXGBE_LINK_SPEED_10GB_FULL:
- link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
}
@@ -4521,7 +4520,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
(int)(dev->data->port_id),
(unsigned)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -4740,13 +4739,13 @@ ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
tx_pause = 0;
if (rx_pause && tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
@@ -5044,8 +5043,8 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
}
for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
IXGBE_4_BIT_MASK);
if (!mask)
@@ -5092,8 +5091,8 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
}
for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
IXGBE_4_BIT_MASK);
if (!mask)
@@ -5255,22 +5254,22 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/*
* VF has no ability to enable/disable HW CRC
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
- if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+ conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
}
#else
- if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+ if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+ conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
}
#endif
@@ -5330,8 +5329,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
ixgbevf_set_vfta_all(dev, 1);
/* Set HW strip */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
err = ixgbevf_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
@@ -5568,10 +5567,10 @@ ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
int on = 0;
/* VF function only support hw strip feature, others are not support */
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ on = !!(rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
ixgbevf_vlan_strip_queue_set(dev, i, on);
}
}
@@ -5702,12 +5701,12 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
return -ENOTSUP;
if (on) {
- for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
uta_info->uta_shadow[i] = ~0;
IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
}
} else {
- for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
uta_info->uta_shadow[i] = 0;
IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
}
@@ -5721,15 +5720,15 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
{
uint32_t new_val = orig_val;
- if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
new_val |= IXGBE_VMOLR_AUPE;
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
new_val |= IXGBE_VMOLR_ROMPE;
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
new_val |= IXGBE_VMOLR_ROPE;
- if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
new_val |= IXGBE_VMOLR_BAM;
- if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
new_val |= IXGBE_VMOLR_MPE;
return new_val;
@@ -6724,15 +6723,15 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev)
rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
- case ETH_SPEED_NUM_100M:
+ case RTE_ETH_SPEED_NUM_100M:
incval = IXGBE_INCVAL_100;
shift = IXGBE_INCVAL_SHIFT_100;
break;
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
incval = IXGBE_INCVAL_1GB;
shift = IXGBE_INCVAL_SHIFT_1GB;
break;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
default:
incval = IXGBE_INCVAL_10GB;
shift = IXGBE_INCVAL_SHIFT_10GB;
@@ -7143,16 +7142,16 @@ ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
- return ETH_RSS_RETA_SIZE_512;
+ return RTE_ETH_RSS_RETA_SIZE_512;
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
- return ETH_RSS_RETA_SIZE_64;
+ return RTE_ETH_RSS_RETA_SIZE_64;
case ixgbe_mac_X540_vf:
case ixgbe_mac_82599_vf:
return 0;
default:
- return ETH_RSS_RETA_SIZE_128;
+ return RTE_ETH_RSS_RETA_SIZE_128;
}
}
@@ -7162,10 +7161,10 @@ ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
- if (reta_idx < ETH_RSS_RETA_SIZE_128)
+ if (reta_idx < RTE_ETH_RSS_RETA_SIZE_128)
return IXGBE_RETA(reta_idx >> 2);
else
- return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+ return IXGBE_ERETA((reta_idx - RTE_ETH_RSS_RETA_SIZE_128) >> 2);
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
@@ -7221,7 +7220,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
uint8_t nb_tcs;
uint8_t i, j;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
else
dcb_info->nb_tcs = 1;
@@ -7232,7 +7231,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
if (dcb_config->vt_mode) { /* vt is enabled*/
struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
for (j = 0; j < nb_tcs; j++) {
@@ -7256,9 +7255,9 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
} else { /* vt is disabled*/
struct rte_eth_dcb_rx_conf *rx_conf =
&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
- if (dcb_info->nb_tcs == ETH_4_TCS) {
+ if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
for (i = 0; i < dcb_info->nb_tcs; i++) {
dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -7271,7 +7270,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
- } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+ } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
for (i = 0; i < dcb_info->nb_tcs; i++) {
dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -7524,7 +7523,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
}
switch (l2_tunnel->l2_tunnel_type) {
- case RTE_L2_TUNNEL_TYPE_E_TAG:
+ case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
break;
default:
@@ -7556,7 +7555,7 @@ ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
return ret;
switch (l2_tunnel->l2_tunnel_type) {
- case RTE_L2_TUNNEL_TYPE_E_TAG:
+ case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
break;
default:
@@ -7653,12 +7652,12 @@ ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
- case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_TEREDO:
PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
ret = -EINVAL;
break;
@@ -7690,11 +7689,11 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
- case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_TEREDO:
PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
ret = -EINVAL;
break;
@@ -114,15 +114,15 @@
#define IXGBE_FDIR_NVGRE_TUNNEL_TYPE 0x0
#define IXGBE_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_EX | \
+ RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
#define IXGBE_VF_IRQ_ENABLE_MASK 3 /* vf irq enable mask */
#define IXGBE_VF_MAXMSIVECTOR 1
@@ -90,9 +90,9 @@ static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
uint32_t key);
static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc);
+ enum rte_eth_fdir_pballoc_type pballoc);
static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc);
+ enum rte_eth_fdir_pballoc_type pballoc);
static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input, uint8_t queue,
uint32_t fdircmd, uint32_t fdirhash,
@@ -163,20 +163,20 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
* flexbytes matching field, and drop queue (only for perfect matching mode).
*/
static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf, uint32_t *fdirctrl)
{
*fdirctrl = 0;
switch (conf->pballoc) {
- case RTE_FDIR_PBALLOC_64K:
+ case RTE_ETH_FDIR_PBALLOC_64K:
/* 8k - 1 signature filters */
*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
break;
- case RTE_FDIR_PBALLOC_128K:
+ case RTE_ETH_FDIR_PBALLOC_128K:
/* 16k - 1 signature filters */
*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
break;
- case RTE_FDIR_PBALLOC_256K:
+ case RTE_ETH_FDIR_PBALLOC_256K:
/* 32k - 1 signature filters */
*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
break;
@@ -807,13 +807,13 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
static uint32_t
atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc)
+ enum rte_eth_fdir_pballoc_type pballoc)
{
- if (pballoc == RTE_FDIR_PBALLOC_256K)
+ if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
return ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
PERFECT_BUCKET_256KB_HASH_MASK;
- else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
return ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
PERFECT_BUCKET_128KB_HASH_MASK;
@@ -850,15 +850,15 @@ ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
*/
static uint32_t
atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc)
+ enum rte_eth_fdir_pballoc_type pballoc)
{
uint32_t bucket_hash, sig_hash;
- if (pballoc == RTE_FDIR_PBALLOC_256K)
+ if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
bucket_hash = ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
SIG_BUCKET_256KB_HASH_MASK;
- else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
bucket_hash = ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
SIG_BUCKET_128KB_HASH_MASK;
@@ -1259,7 +1259,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
/**
* grp and e_cid_base are bit fields and only use 14 bits.
* e-tag id is taken as little endian by HW.
@@ -392,7 +392,7 @@ ixgbe_crypto_create_session(void *device,
aead_xform = &conf->crypto_xform->aead;
if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
} else {
PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
@@ -400,7 +400,7 @@ ixgbe_crypto_create_session(void *device,
return -ENOTSUP;
}
} else {
- if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+ if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
} else {
PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
@@ -633,11 +633,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
tx_offloads = dev->data->dev_conf.txmode.offloads;
/* sanity checks */
- if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
return -1;
}
- if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
return -1;
}
@@ -657,7 +657,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
- if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
if (reg != 0) {
@@ -665,7 +665,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
return -1;
}
}
- if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
IXGBE_SECTXCTRL_STORE_FORWARD);
reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
@@ -104,15 +104,15 @@ int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
hw->mac.mc_filter_type = 0;
- if (vf_num >= ETH_32_POOLS) {
+ if (vf_num >= RTE_ETH_32_POOLS) {
nb_queue = 2;
- RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
- } else if (vf_num >= ETH_16_POOLS) {
+ RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+ } else if (vf_num >= RTE_ETH_16_POOLS) {
nb_queue = 4;
- RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
} else {
nb_queue = 8;
- RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
}
RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
@@ -263,15 +263,15 @@