@@ -199,6 +199,9 @@ rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
char *cls_str = NULL;
int str_size;
+ if (iter == NULL || devargs_str == NULL)
+ return -EINVAL;
+
memset(iter, 0, sizeof(*iter));
/*
@@ -293,7 +296,7 @@ rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
uint16_t
rte_eth_iterator_next(struct rte_dev_iterator *iter)
{
- if (iter->cls == NULL) /* invalid ethdev iterator */
+ if (iter == NULL || iter->cls == NULL) /* invalid ethdev iterator */
return RTE_MAX_ETHPORTS;
do { /* loop to try all matching rte_device */
@@ -322,7 +325,7 @@ rte_eth_iterator_next(struct rte_dev_iterator *iter)
void
rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
{
- if (iter->bus_str == NULL)
+ if (iter == NULL || iter->bus_str == NULL)
return; /* nothing to free in pure class filter */
free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
@@ -428,6 +431,9 @@ rte_eth_dev_allocated(const char *name)
{
struct rte_eth_dev *ethdev;
+ if (name == NULL)
+ return NULL;
+
eth_dev_shared_data_prepare();
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
@@ -472,6 +478,9 @@ rte_eth_dev_allocate(const char *name)
struct rte_eth_dev *eth_dev = NULL;
size_t name_len;
+ if (name == NULL)
+ return NULL;
+
name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
if (name_len == 0) {
RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
@@ -525,6 +534,9 @@ rte_eth_dev_attach_secondary(const char *name)
uint16_t i;
struct rte_eth_dev *eth_dev = NULL;
+ if (name == NULL)
+ return NULL;
+
eth_dev_shared_data_prepare();
/* Synchronize port attachment to primary port creation and release. */
@@ -622,6 +634,9 @@ rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
int
rte_eth_dev_owner_new(uint64_t *owner_id)
{
+ if (owner_id == NULL)
+ return -EINVAL;
+
eth_dev_shared_data_prepare();
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
@@ -678,6 +693,9 @@ rte_eth_dev_owner_set(const uint16_t port_id,
{
int ret;
+ if (owner == NULL)
+ return -EINVAL;
+
eth_dev_shared_data_prepare();
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
@@ -741,6 +759,9 @@ rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
int ret = 0;
struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
+ if (owner == NULL)
+ return -EINVAL;
+
eth_dev_shared_data_prepare();
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
@@ -820,7 +841,7 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
{
uint16_t pid;
- if (name == NULL) {
+ if (name == NULL || port_id == NULL) {
RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
return -EINVAL;
}
@@ -1297,6 +1318,9 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
int ret;
uint16_t old_mtu;
+ if (dev_conf == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -1576,6 +1600,9 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
void
rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
{
+ if (dev == NULL)
+ return;
+
if (dev->data->dev_started) {
RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
dev->data->port_id);
@@ -2137,6 +2164,9 @@ rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
int i;
int count;
+ if (conf == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -2309,6 +2339,9 @@ rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
int count;
int ret;
+ if (conf == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
if (tx_queue_id >= dev->data->nb_tx_queues) {
@@ -2459,6 +2492,9 @@ int
rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
buffer_tx_error_fn cbfn, void *userdata)
{
+ if (buffer == NULL)
+ return -EINVAL;
+
buffer->error_callback = cbfn;
buffer->error_userdata = userdata;
return 0;
@@ -2491,6 +2527,12 @@ rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
+ if (queue_id >= dev->data->nb_tx_queues) {
+ RTE_ETHDEV_LOG(ERR, "Queue id should be < %u.",
+ dev->data->nb_tx_queues);
+ return -EINVAL;
+ }
+
/* Call driver to free pending mbufs. */
ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
free_cnt);
@@ -2606,6 +2648,9 @@ rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
{
struct rte_eth_dev *dev;
+ if (eth_link == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -2626,6 +2671,9 @@ rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
{
struct rte_eth_dev *dev;
+ if (eth_link == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -2667,6 +2715,9 @@ rte_eth_link_speed_to_str(uint32_t link_speed)
int
rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
{
+ if (str == NULL || eth_link == NULL)
+ return -EINVAL;
+
if (eth_link->link_status == ETH_LINK_DOWN)
return snprintf(str, len, "Link down");
else
@@ -2683,6 +2734,9 @@ rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
{
struct rte_eth_dev *dev;
+ if (stats == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -3257,6 +3311,9 @@ rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
{
struct rte_eth_dev *dev;
+ if (fw_version == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -3278,6 +3335,9 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
};
int diag;
+ if (dev_info == NULL)
+ return -EINVAL;
+
/*
* Init dev_info before port_id check since caller does not have
* return status and does not know if get is successful or not.
@@ -3325,6 +3385,9 @@ rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
struct rte_eth_dev *dev;
const uint32_t *all_ptypes;
+ if (ptypes == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
@@ -3434,6 +3497,9 @@ rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
{
struct rte_eth_dev *dev;
+ if (mac_addr == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
@@ -3446,6 +3512,9 @@ rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
{
struct rte_eth_dev *dev;
+ if (mtu == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -3695,6 +3764,9 @@ rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
{
struct rte_eth_dev *dev;
+ if (fc_conf == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
@@ -3707,6 +3779,9 @@ rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
{
struct rte_eth_dev *dev;
+ if (fc_conf == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
@@ -3724,6 +3799,9 @@ rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
{
struct rte_eth_dev *dev;
+ if (pfc_conf == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
@@ -3795,6 +3873,9 @@ rte_eth_dev_rss_reta_update(uint16_t port_id,
struct rte_eth_dev *dev;
int ret;
+ if (reta_conf == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
/* Check mask bits */
ret = eth_check_reta_mask(reta_conf, reta_size);
@@ -3822,6 +3903,9 @@ rte_eth_dev_rss_reta_query(uint16_t port_id,
struct rte_eth_dev *dev;
int ret;
+ if (reta_conf == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
/* Check mask bits */
@@ -3843,6 +3927,9 @@ rte_eth_dev_rss_hash_update(uint16_t port_id,
struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
int ret;
+ if (rss_conf == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
ret = rte_eth_dev_info_get(port_id, &dev_info);
@@ -3871,6 +3958,9 @@ rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
{
struct rte_eth_dev *dev;
+ if (rss_conf == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
@@ -4026,6 +4116,9 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
uint64_t pool_mask;
int ret;
+ if (addr == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
@@ -4076,6 +4169,9 @@ rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
struct rte_eth_dev *dev;
int index;
+ if (addr == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
@@ -4107,6 +4203,9 @@ rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
struct rte_eth_dev *dev;
int ret;
+ if (addr == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (!rte_is_valid_assigned_ether_addr(addr))
@@ -4162,6 +4261,9 @@ rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
int ret;
struct rte_eth_dev *dev;
+ if (addr == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -4264,6 +4366,9 @@ rte_eth_mirror_rule_set(uint16_t port_id,
{
struct rte_eth_dev *dev;
+ if (mirror_conf == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (mirror_conf->rule_type == 0) {
RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
@@ -4447,6 +4552,9 @@ rte_eth_dev_callback_process(struct rte_eth_dev *dev,
struct rte_eth_dev_callback dev_cb;
int rc = 0;
+ if (dev == NULL)
+ return -EINVAL;
+
rte_spinlock_lock(ð_dev_cb_lock);
TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
if (cb_lst->cb_fn == NULL || cb_lst->event != event)
@@ -4568,6 +4676,9 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
const struct rte_memzone *mz;
int rc;
+ if (dev == NULL || ring_name == NULL)
+ return NULL;
+
rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
queue_id, ring_name);
if (rc >= RTE_MEMZONE_NAMESIZE) {
@@ -4602,6 +4713,9 @@ rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
const struct rte_memzone *mz;
int rc = 0;
+ if (dev == NULL || ring_name == NULL)
+ return -EINVAL;
+
rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
queue_id, ring_name);
if (rc >= RTE_MEMZONE_NAMESIZE) {
@@ -4628,6 +4742,9 @@ rte_eth_dev_create(struct rte_device *device, const char *name,
struct rte_eth_dev *ethdev;
int retval;
+ if (device == NULL)
+ return -EINVAL;
+
RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
@@ -5207,6 +5324,9 @@ rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
{
struct rte_eth_dev *dev;
+ if (timestamp == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -5221,6 +5341,9 @@ rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
{
struct rte_eth_dev *dev;
+ if (timestamp == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -5247,6 +5370,9 @@ rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
{
struct rte_eth_dev *dev;
+ if (timestamp == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -5260,6 +5386,9 @@ rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
{
struct rte_eth_dev *dev;
+ if (timestamp == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -5273,6 +5402,9 @@ rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
{
struct rte_eth_dev *dev;
+ if (clock == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -5370,6 +5502,9 @@ rte_eth_dev_get_dcb_info(uint16_t port_id,
{
struct rte_eth_dev *dev;
+ if (dcb_info == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -5421,6 +5556,9 @@ rte_eth_dev_hairpin_capability_get(uint16_t port_id,
{
struct rte_eth_dev *dev;
+ if (cap == NULL)
+ return -EINVAL;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -5432,6 +5570,9 @@ rte_eth_dev_hairpin_capability_get(uint16_t port_id,
int
rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
{
+ if (dev == NULL || queue_id >= RTE_MAX_QUEUES_PER_PORT)
+ return -EINVAL;
+
if (dev->data->rx_queue_state[queue_id] ==
RTE_ETH_QUEUE_STATE_HAIRPIN)
return 1;
@@ -5441,6 +5582,9 @@ rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
int
rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
{
+ if (dev == NULL || queue_id >= RTE_MAX_QUEUES_PER_PORT)
+ return -EINVAL;
+
if (dev->data->tx_queue_state[queue_id] ==
RTE_ETH_QUEUE_STATE_HAIRPIN)
return 1;
@@ -5487,6 +5631,9 @@ rte_eth_switch_domain_alloc(uint16_t *domain_id)
{
uint16_t i;
+ if (domain_id == NULL)
+ return -EINVAL;
+
*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
@@ -5590,6 +5737,9 @@ rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
unsigned int i;
int result = 0;
+ if (dargs == NULL || eth_da == NULL)
+ return -EINVAL;
+
memset(eth_da, 0, sizeof(*eth_da));
result = eth_dev_devargs_tokenise(&args, dargs);
@@ -5629,6 +5779,8 @@ rte_eth_representor_id_get(const struct rte_eth_dev *ethdev,
struct rte_eth_representor_info *info = NULL;
size_t size;
+ if (ethdev == NULL)
+ return -EINVAL;
if (type == RTE_ETH_REPRESENTOR_NONE)
return 0;
if (repr_id == NULL)