[RFC,3/3] eal: enhance lock annotations for rwlock

Message ID 20241202125316.3732529-4-david.marchand@redhat.com (mailing list archive)
State Superseded
Delegated to: Thomas Monjalon
Headers
Series Improve lock annotations |

Checks

Context Check Description
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation fail Compilation issues
ci/intel-Testing success Testing PASS
ci/github-robot: build success github build: passed
ci/iol-mellanox-Performance success Performance Testing PASS
ci/intel-Functional success Functional PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-marvell-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-unit-amd64-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-compile-amd64-testing warning Testing issues
ci/iol-sample-apps-testing success Testing PASS
ci/iol-unit-arm64-testing success Testing PASS
ci/iol-compile-arm64-testing fail Testing issues

Commit Message

David Marchand Dec. 2, 2024, 12:53 p.m. UTC
Convert rwlock to the clang capability annotations.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 lib/eal/common/eal_memalloc.h        |   2 +-
 lib/eal/common/eal_private.h         |   2 +-
 lib/eal/include/generic/rte_rwlock.h |  23 ++--
 lib/eal/include/rte_eal_memconfig.h  |  24 ++--
 lib/ethdev/ethdev_driver.c           |   4 +-
 lib/ethdev/ethdev_private.h          |   4 +-
 lib/ethdev/rte_ethdev.c              |   4 +-
 lib/hash/rte_cuckoo_hash.c           |   8 +-
 lib/vhost/iotlb.h                    |   8 +-
 lib/vhost/vhost.c                    |  10 +-
 lib/vhost/vhost.h                    |  24 ++--
 lib/vhost/vhost_crypto.c             |  14 +--
 lib/vhost/virtio_net.c               | 170 +++++++++++++--------------
 lib/vhost/virtio_net_ctrl.c          |   2 +-
 14 files changed, 150 insertions(+), 149 deletions(-)
  

Patch

diff --git a/lib/eal/common/eal_memalloc.h b/lib/eal/common/eal_memalloc.h
index 286ffb7633..0c267066d9 100644
--- a/lib/eal/common/eal_memalloc.h
+++ b/lib/eal/common/eal_memalloc.h
@@ -92,7 +92,7 @@  eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset);
 
 int
 eal_memalloc_init(void)
-	__rte_shared_locks_required(rte_mcfg_mem_get_lock());
+	__rte_requires_shared_capability(rte_mcfg_mem_get_lock());
 
 int
 eal_memalloc_cleanup(void);
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index bb315dab04..89bc05ecc5 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -124,7 +124,7 @@  int rte_eal_memseg_init(void);
  *   0 on success, negative on error
  */
 int rte_eal_memory_init(void)
-	__rte_shared_locks_required(rte_mcfg_mem_get_lock());
+	__rte_requires_shared_capability(rte_mcfg_mem_get_lock());
 
 /**
  * Configure timers
diff --git a/lib/eal/include/generic/rte_rwlock.h b/lib/eal/include/generic/rte_rwlock.h
index ac0474466a..197b245d11 100644
--- a/lib/eal/include/generic/rte_rwlock.h
+++ b/lib/eal/include/generic/rte_rwlock.h
@@ -23,6 +23,7 @@ 
  */
 
 #include <errno.h>
+#include <stdbool.h>
 
 #include <rte_branch_prediction.h>
 #include <rte_common.h>
@@ -57,7 +58,7 @@  extern "C" {
 				/* Writer is waiting or has lock */
 #define RTE_RWLOCK_READ	 0x4	/* Reader increment */
 
-typedef struct __rte_lockable {
+typedef struct __rte_capability("rwlock") {
 	RTE_ATOMIC(int32_t) cnt;
 } rte_rwlock_t;
 
@@ -90,7 +91,7 @@  rte_rwlock_init(rte_rwlock_t *rwl)
  */
 static inline void
 rte_rwlock_read_lock(rte_rwlock_t *rwl)
-	__rte_shared_lock_function(rwl)
+	__rte_acquire_shared_capability(rwl)
 	__rte_no_thread_safety_analysis
 {
 	int32_t x;
@@ -127,7 +128,7 @@  rte_rwlock_read_lock(rte_rwlock_t *rwl)
  */
 static inline int
 rte_rwlock_read_trylock(rte_rwlock_t *rwl)
-	__rte_shared_trylock_function(0, rwl)
+	__rte_try_acquire_shared_capability(false, rwl)
 	__rte_no_thread_safety_analysis
 {
 	int32_t x;
@@ -160,7 +161,7 @@  rte_rwlock_read_trylock(rte_rwlock_t *rwl)
  */
 static inline void
 rte_rwlock_read_unlock(rte_rwlock_t *rwl)
-	__rte_unlock_function(rwl)
+	__rte_release_shared_capability(rwl)
 	__rte_no_thread_safety_analysis
 {
 	rte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ, rte_memory_order_release);
@@ -178,7 +179,7 @@  rte_rwlock_read_unlock(rte_rwlock_t *rwl)
  */
 static inline int
 rte_rwlock_write_trylock(rte_rwlock_t *rwl)
-	__rte_exclusive_trylock_function(0, rwl)
+	__rte_try_acquire_capability(false, rwl)
 	__rte_no_thread_safety_analysis
 {
 	int32_t x;
@@ -200,7 +201,7 @@  rte_rwlock_write_trylock(rte_rwlock_t *rwl)
  */
 static inline void
 rte_rwlock_write_lock(rte_rwlock_t *rwl)
-	__rte_exclusive_lock_function(rwl)
+	__rte_acquire_capability(rwl)
 	__rte_no_thread_safety_analysis
 {
 	int32_t x;
@@ -238,7 +239,7 @@  rte_rwlock_write_lock(rte_rwlock_t *rwl)
  */
 static inline void
 rte_rwlock_write_unlock(rte_rwlock_t *rwl)
-	__rte_unlock_function(rwl)
+	__rte_release_capability(rwl)
 	__rte_no_thread_safety_analysis
 {
 	rte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_WRITE, rte_memory_order_release);
@@ -276,7 +277,7 @@  rte_rwlock_write_is_locked(rte_rwlock_t *rwl)
  */
 static inline void
 rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
-	__rte_shared_lock_function(rwl);
+	__rte_acquire_shared_capability(rwl);
 
 /**
  * Commit hardware memory transaction or release the read lock if the lock is used as a fall-back
@@ -286,7 +287,7 @@  rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
  */
 static inline void
 rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
-	__rte_unlock_function(rwl);
+	__rte_release_shared_capability(rwl);
 
 /**
  * Try to execute critical section in a hardware memory transaction, if it
@@ -303,7 +304,7 @@  rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
  */
 static inline void
 rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
-	__rte_exclusive_lock_function(rwl);
+	__rte_acquire_capability(rwl);
 
 /**
  * Commit hardware memory transaction or release the write lock if the lock is used as a fall-back
@@ -313,7 +314,7 @@  rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
  */
 static inline void
 rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
-	__rte_unlock_function(rwl);
+	__rte_release_capability(rwl);
 
 #ifdef __cplusplus
 }
diff --git a/lib/eal/include/rte_eal_memconfig.h b/lib/eal/include/rte_eal_memconfig.h
index 55d78de334..c3056021a0 100644
--- a/lib/eal/include/rte_eal_memconfig.h
+++ b/lib/eal/include/rte_eal_memconfig.h
@@ -48,84 +48,84 @@  rte_mcfg_ethdev_get_lock(void);
  */
 void
 rte_mcfg_mem_read_lock(void)
-	__rte_shared_lock_function(rte_mcfg_mem_get_lock());
+	__rte_acquire_shared_capability(rte_mcfg_mem_get_lock());
 
 /**
  * Unlock the internal EAL shared memory configuration for shared access.
  */
 void
 rte_mcfg_mem_read_unlock(void)
-	__rte_unlock_function(rte_mcfg_mem_get_lock());
+	__rte_release_shared_capability(rte_mcfg_mem_get_lock());
 
 /**
  * Lock the internal EAL shared memory configuration for exclusive access.
  */
 void
 rte_mcfg_mem_write_lock(void)
-	__rte_exclusive_lock_function(rte_mcfg_mem_get_lock());
+	__rte_acquire_capability(rte_mcfg_mem_get_lock());
 
 /**
  * Unlock the internal EAL shared memory configuration for exclusive access.
  */
 void
 rte_mcfg_mem_write_unlock(void)
-	__rte_unlock_function(rte_mcfg_mem_get_lock());
+	__rte_release_capability(rte_mcfg_mem_get_lock());
 
 /**
  * Lock the internal EAL TAILQ list for shared access.
  */
 void
 rte_mcfg_tailq_read_lock(void)
-	__rte_shared_lock_function(rte_mcfg_tailq_get_lock());
+	__rte_acquire_shared_capability(rte_mcfg_tailq_get_lock());
 
 /**
  * Unlock the internal EAL TAILQ list for shared access.
  */
 void
 rte_mcfg_tailq_read_unlock(void)
-	__rte_unlock_function(rte_mcfg_tailq_get_lock());
+	__rte_release_shared_capability(rte_mcfg_tailq_get_lock());
 
 /**
  * Lock the internal EAL TAILQ list for exclusive access.
  */
 void
 rte_mcfg_tailq_write_lock(void)
-	__rte_exclusive_lock_function(rte_mcfg_tailq_get_lock());
+	__rte_acquire_capability(rte_mcfg_tailq_get_lock());
 
 /**
  * Unlock the internal EAL TAILQ list for exclusive access.
  */
 void
 rte_mcfg_tailq_write_unlock(void)
-	__rte_unlock_function(rte_mcfg_tailq_get_lock());
+	__rte_release_capability(rte_mcfg_tailq_get_lock());
 
 /**
  * Lock the internal EAL Mempool list for shared access.
  */
 void
 rte_mcfg_mempool_read_lock(void)
-	__rte_shared_lock_function(rte_mcfg_mempool_get_lock());
+	__rte_acquire_shared_capability(rte_mcfg_mempool_get_lock());
 
 /**
  * Unlock the internal EAL Mempool list for shared access.
  */
 void
 rte_mcfg_mempool_read_unlock(void)
-	__rte_unlock_function(rte_mcfg_mempool_get_lock());
+	__rte_release_shared_capability(rte_mcfg_mempool_get_lock());
 
 /**
  * Lock the internal EAL Mempool list for exclusive access.
  */
 void
 rte_mcfg_mempool_write_lock(void)
-	__rte_exclusive_lock_function(rte_mcfg_mempool_get_lock());
+	__rte_acquire_capability(rte_mcfg_mempool_get_lock());
 
 /**
  * Unlock the internal EAL Mempool list for exclusive access.
  */
 void
 rte_mcfg_mempool_write_unlock(void)
-	__rte_unlock_function(rte_mcfg_mempool_get_lock());
+	__rte_release_capability(rte_mcfg_mempool_get_lock());
 
 /**
  * Lock the internal EAL Timer Library lock for exclusive access.
diff --git a/lib/ethdev/ethdev_driver.c b/lib/ethdev/ethdev_driver.c
index 9afef06431..13d328d97b 100644
--- a/lib/ethdev/ethdev_driver.c
+++ b/lib/ethdev/ethdev_driver.c
@@ -48,7 +48,7 @@  eth_dev_allocated(const char *name)
 
 static uint16_t
 eth_dev_find_free_port(void)
-	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
+	__rte_requires_capability(rte_mcfg_ethdev_get_lock())
 {
 	uint16_t i;
 
@@ -65,7 +65,7 @@  eth_dev_find_free_port(void)
 
 static struct rte_eth_dev *
 eth_dev_get(uint16_t port_id)
-	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
+	__rte_requires_capability(rte_mcfg_ethdev_get_lock())
 {
 	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
 
diff --git a/lib/ethdev/ethdev_private.h b/lib/ethdev/ethdev_private.h
index 0d36b9c30f..b07b1b4c42 100644
--- a/lib/ethdev/ethdev_private.h
+++ b/lib/ethdev/ethdev_private.h
@@ -70,9 +70,9 @@  void eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo,
 
 
 void *eth_dev_shared_data_prepare(void)
-	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock());
+	__rte_requires_capability(rte_mcfg_ethdev_get_lock());
 void eth_dev_shared_data_release(void)
-	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock());
+	__rte_requires_capability(rte_mcfg_ethdev_get_lock());
 
 void eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid);
 void eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid);
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 6413c54e3b..355d5c2df1 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -420,7 +420,7 @@  rte_eth_dev_is_valid_port(uint16_t port_id)
 
 static int
 eth_is_valid_owner_id(uint64_t owner_id)
-	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
+	__rte_requires_capability(rte_mcfg_ethdev_get_lock())
 {
 	if (owner_id == RTE_ETH_DEV_NO_OWNER ||
 	    eth_dev_shared_data->next_owner_id <= owner_id)
@@ -471,7 +471,7 @@  rte_eth_dev_owner_new(uint64_t *owner_id)
 static int
 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
 		       const struct rte_eth_dev_owner *new_owner)
-	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
+	__rte_requires_capability(rte_mcfg_ethdev_get_lock())
 {
 	struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
 	struct rte_eth_dev_owner *port_owner;
diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c
index 9575e8aa0c..00cbdf89d6 100644
--- a/lib/hash/rte_cuckoo_hash.c
+++ b/lib/hash/rte_cuckoo_hash.c
@@ -620,7 +620,7 @@  rte_hash_count(const struct rte_hash *h)
 /* Read write locks implemented using rte_rwlock */
 static inline void
 __hash_rw_writer_lock(const struct rte_hash *h)
-	__rte_exclusive_lock_function(&h->readwrite_lock)
+	__rte_acquire_capability(&h->readwrite_lock)
 	__rte_no_thread_safety_analysis
 {
 	if (h->writer_takes_lock && h->hw_trans_mem_support)
@@ -631,7 +631,7 @@  __hash_rw_writer_lock(const struct rte_hash *h)
 
 static inline void
 __hash_rw_reader_lock(const struct rte_hash *h)
-	__rte_shared_lock_function(&h->readwrite_lock)
+	__rte_acquire_shared_capability(&h->readwrite_lock)
 	__rte_no_thread_safety_analysis
 {
 	if (h->readwrite_concur_support && h->hw_trans_mem_support)
@@ -642,7 +642,7 @@  __hash_rw_reader_lock(const struct rte_hash *h)
 
 static inline void
 __hash_rw_writer_unlock(const struct rte_hash *h)
-	__rte_unlock_function(&h->readwrite_lock)
+	__rte_release_capability(&h->readwrite_lock)
 	__rte_no_thread_safety_analysis
 {
 	if (h->writer_takes_lock && h->hw_trans_mem_support)
@@ -653,7 +653,7 @@  __hash_rw_writer_unlock(const struct rte_hash *h)
 
 static inline void
 __hash_rw_reader_unlock(const struct rte_hash *h)
-	__rte_unlock_function(&h->readwrite_lock)
+	__rte_release_shared_capability(&h->readwrite_lock)
 	__rte_no_thread_safety_analysis
 {
 	if (h->readwrite_concur_support && h->hw_trans_mem_support)
diff --git a/lib/vhost/iotlb.h b/lib/vhost/iotlb.h
index 81ca04df21..72232b0dcf 100644
--- a/lib/vhost/iotlb.h
+++ b/lib/vhost/iotlb.h
@@ -11,28 +11,28 @@ 
 
 static __rte_always_inline void
 vhost_user_iotlb_rd_lock(struct vhost_virtqueue *vq)
-	__rte_shared_lock_function(&vq->iotlb_lock)
+	__rte_acquire_shared_capability(&vq->iotlb_lock)
 {
 	rte_rwlock_read_lock(&vq->iotlb_lock);
 }
 
 static __rte_always_inline void
 vhost_user_iotlb_rd_unlock(struct vhost_virtqueue *vq)
-	__rte_unlock_function(&vq->iotlb_lock)
+	__rte_release_shared_capability(&vq->iotlb_lock)
 {
 	rte_rwlock_read_unlock(&vq->iotlb_lock);
 }
 
 static __rte_always_inline void
 vhost_user_iotlb_wr_lock(struct vhost_virtqueue *vq)
-	__rte_exclusive_lock_function(&vq->iotlb_lock)
+	__rte_acquire_capability(&vq->iotlb_lock)
 {
 	rte_rwlock_write_lock(&vq->iotlb_lock);
 }
 
 static __rte_always_inline void
 vhost_user_iotlb_wr_unlock(struct vhost_virtqueue *vq)
-	__rte_unlock_function(&vq->iotlb_lock)
+	__rte_release_capability(&vq->iotlb_lock)
 {
 	rte_rwlock_write_unlock(&vq->iotlb_lock);
 }
diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index 5a50a06f8d..2f34d3500e 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -390,7 +390,7 @@  cleanup_device(struct virtio_net *dev, int destroy)
 
 static void
 vhost_free_async_mem(struct vhost_virtqueue *vq)
-	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_requires_capability(&vq->access_lock)
 {
 	if (!vq->async)
 		return;
@@ -439,7 +439,7 @@  free_device(struct virtio_net *dev)
 
 static __rte_always_inline int
 log_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	if (likely(!(vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG))))
 		return 0;
@@ -488,7 +488,7 @@  translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 static int
 vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint64_t req_size, size;
 
@@ -527,7 +527,7 @@  vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
 
 static int
 vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint64_t req_size, size;
 
@@ -1772,7 +1772,7 @@  rte_vhost_extern_callback_register(int vid,
 
 static __rte_always_inline int
 async_channel_register(struct virtio_net *dev, struct vhost_virtqueue *vq)
-	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_requires_capability(&vq->access_lock)
 {
 	struct vhost_async *async;
 	int node = vq->numa_node;
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 1f4192f5d1..0a7815832f 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -576,7 +576,7 @@  vhost_virtqueue_reconnect_log_packed(struct vhost_virtqueue *vq)
 
 static inline void
 vq_assert_lock__(struct virtio_net *dev, struct vhost_virtqueue *vq, const char *func)
-	__rte_assert_exclusive_lock(&vq->access_lock)
+	__rte_assert_capability(&vq->access_lock)
 {
 	if (unlikely(!rte_rwlock_write_is_locked(&vq->access_lock)))
 		rte_panic("VHOST_CONFIG: (%s) %s() called without access lock taken.\n",
@@ -627,14 +627,14 @@  void __vhost_log_cache_write(struct virtio_net *dev,
 void __vhost_log_cache_write_iova(struct virtio_net *dev,
 		struct vhost_virtqueue *vq,
 		uint64_t iova, uint64_t len)
-	__rte_shared_locks_required(&vq->iotlb_lock);
+	__rte_requires_shared_capability(&vq->iotlb_lock);
 void __vhost_log_cache_sync(struct virtio_net *dev,
 		struct vhost_virtqueue *vq);
 
 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
 void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			    uint64_t iova, uint64_t len)
-	__rte_shared_locks_required(&vq->iotlb_lock);
+	__rte_requires_shared_capability(&vq->iotlb_lock);
 
 static __rte_always_inline void
 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
@@ -684,7 +684,7 @@  vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
 static __rte_always_inline void
 vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			   uint64_t iova, uint64_t len)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
 		return;
@@ -698,7 +698,7 @@  vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 static __rte_always_inline void
 vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			   uint64_t iova, uint64_t len)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
 		return;
@@ -904,24 +904,24 @@  void vhost_backend_cleanup(struct virtio_net *dev);
 
 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			uint64_t iova, uint64_t *len, uint8_t perm)
-	__rte_shared_locks_required(&vq->iotlb_lock);
+	__rte_requires_shared_capability(&vq->iotlb_lock);
 void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
 			struct vhost_virtqueue *vq,
 			uint64_t desc_addr, uint64_t desc_len)
-	__rte_shared_locks_required(&vq->iotlb_lock);
+	__rte_requires_shared_capability(&vq->iotlb_lock);
 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
-	__rte_exclusive_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock);
+	__rte_requires_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock);
 uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		uint64_t log_addr)
-	__rte_shared_locks_required(&vq->iotlb_lock);
+	__rte_requires_shared_capability(&vq->iotlb_lock);
 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
-	__rte_exclusive_locks_required(&vq->access_lock);
+	__rte_requires_capability(&vq->access_lock);
 
 static __rte_always_inline uint64_t
 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			uint64_t iova, uint64_t *len, uint8_t perm)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
 		return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
diff --git a/lib/vhost/vhost_crypto.c b/lib/vhost/vhost_crypto.c
index 7caf6d9afa..3dc41a3bd5 100644
--- a/lib/vhost/vhost_crypto.c
+++ b/lib/vhost/vhost_crypto.c
@@ -491,7 +491,7 @@  static __rte_always_inline struct virtio_crypto_inhdr *
 reach_inhdr(struct vhost_crypto_data_req *vc_req,
 		struct vhost_crypto_desc *head,
 		uint32_t max_n_descs)
-	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
+	__rte_requires_shared_capability(&vc_req->vq->iotlb_lock)
 {
 	struct virtio_crypto_inhdr *inhdr;
 	struct vhost_crypto_desc *last = head + (max_n_descs - 1);
@@ -538,7 +538,7 @@  static __rte_always_inline void *
 get_data_ptr(struct vhost_crypto_data_req *vc_req,
 		struct vhost_crypto_desc *cur_desc,
 		uint8_t perm)
-	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
+	__rte_requires_shared_capability(&vc_req->vq->iotlb_lock)
 {
 	void *data;
 	uint64_t dlen = cur_desc->len;
@@ -555,7 +555,7 @@  get_data_ptr(struct vhost_crypto_data_req *vc_req,
 static __rte_always_inline uint32_t
 copy_data_from_desc(void *dst, struct vhost_crypto_data_req *vc_req,
 	struct vhost_crypto_desc *desc, uint32_t size)
-	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
+	__rte_requires_shared_capability(&vc_req->vq->iotlb_lock)
 {
 	uint64_t remain;
 	uint64_t addr;
@@ -586,7 +586,7 @@  static __rte_always_inline int
 copy_data(void *data, struct vhost_crypto_data_req *vc_req,
 	struct vhost_crypto_desc *head, struct vhost_crypto_desc **cur_desc,
 	uint32_t size, uint32_t max_n_descs)
-	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
+	__rte_requires_shared_capability(&vc_req->vq->iotlb_lock)
 {
 	struct vhost_crypto_desc *desc = *cur_desc;
 	uint32_t left = size;
@@ -670,7 +670,7 @@  prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
 		uint32_t offset,
 		uint64_t write_back_len,
 		uint32_t max_n_descs)
-	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
+	__rte_requires_shared_capability(&vc_req->vq->iotlb_lock)
 {
 	struct vhost_crypto_writeback_data *wb_data, *head;
 	struct vhost_crypto_desc *desc = *cur_desc;
@@ -791,7 +791,7 @@  prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		struct virtio_crypto_cipher_data_req *cipher,
 		struct vhost_crypto_desc *head,
 		uint32_t max_n_descs)
-	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
+	__rte_requires_shared_capability(&vc_req->vq->iotlb_lock)
 {
 	struct vhost_crypto_desc *desc = head;
 	struct vhost_crypto_writeback_data *ewb = NULL;
@@ -945,7 +945,7 @@  prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		struct virtio_crypto_alg_chain_data_req *chain,
 		struct vhost_crypto_desc *head,
 		uint32_t max_n_descs)
-	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
+	__rte_requires_shared_capability(&vc_req->vq->iotlb_lock)
 {
 	struct vhost_crypto_desc *desc = head, *digest_desc;
 	struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index d764d4bc6a..00be575e07 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -55,7 +55,7 @@  is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
 static inline void
 vhost_queue_stats_update(const struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mbuf **pkts, uint16_t count)
-	__rte_shared_locks_required(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
 {
 	struct virtqueue_stats *stats = &vq->stats;
 	int i;
@@ -90,7 +90,7 @@  static __rte_always_inline int64_t
 vhost_async_dma_transfer_one(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		int16_t dma_id, uint16_t vchan_id, uint16_t flag_idx,
 		struct vhost_iov_iter *pkt)
-	__rte_shared_locks_required(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
 {
 	struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
 	uint16_t ring_mask = dma_info->ring_mask;
@@ -140,7 +140,7 @@  static __rte_always_inline uint16_t
 vhost_async_dma_transfer(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		int16_t dma_id, uint16_t vchan_id, uint16_t head_idx,
 		struct vhost_iov_iter *pkts, uint16_t nr_pkts)
-	__rte_shared_locks_required(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
 {
 	struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
 	int64_t ret, nr_copies = 0;
@@ -221,7 +221,7 @@  vhost_async_dma_check_completed(struct virtio_net *dev, int16_t dma_id, uint16_t
 
 static inline void
 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	struct batch_copy_elem *elem = vq->batch_copy_elems;
 	uint16_t count = vq->batch_copy_nb_elems;
@@ -426,7 +426,7 @@  static __rte_always_inline void
 vhost_async_shadow_enqueue_packed_batch(struct vhost_virtqueue *vq,
 				 uint64_t *lens,
 				 uint16_t *ids)
-	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_requires_capability(&vq->access_lock)
 {
 	uint16_t i;
 	struct vhost_async *async = vq->async;
@@ -443,7 +443,7 @@  vhost_async_shadow_enqueue_packed_batch(struct vhost_virtqueue *vq,
 
 static __rte_always_inline void
 vhost_async_shadow_dequeue_packed_batch(struct vhost_virtqueue *vq, uint16_t *ids)
-	__rte_shared_locks_required(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
 {
 	uint16_t i;
 	struct vhost_async *async = vq->async;
@@ -605,7 +605,7 @@  vhost_async_shadow_enqueue_packed(struct vhost_virtqueue *vq,
 				   uint16_t *id,
 				   uint16_t *count,
 				   uint16_t num_buffers)
-	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_requires_capability(&vq->access_lock)
 {
 	uint16_t i;
 	struct vhost_async *async = vq->async;
@@ -627,7 +627,7 @@  vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
 				   uint16_t *id,
 				   uint16_t *count,
 				   uint16_t num_buffers)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
 
@@ -719,7 +719,7 @@  static __rte_always_inline int
 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct buf_vector *buf_vec, uint16_t *vec_idx,
 		uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint16_t vec_id = *vec_idx;
 
@@ -757,7 +757,7 @@  fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			 uint32_t avail_idx, uint16_t *vec_idx,
 			 struct buf_vector *buf_vec, uint16_t *desc_chain_head,
 			 uint32_t *desc_chain_len, uint8_t perm)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
 	uint16_t vec_id = *vec_idx;
@@ -841,7 +841,7 @@  reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 				uint64_t size, struct buf_vector *buf_vec,
 				uint16_t *num_buffers, uint16_t avail_head,
 				uint16_t *nr_vec)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint16_t cur_idx;
 	uint16_t vec_idx = 0;
@@ -892,7 +892,7 @@  fill_vec_buf_packed_indirect(struct virtio_net *dev,
 			struct vhost_virtqueue *vq,
 			struct vring_packed_desc *desc, uint16_t *vec_idx,
 			struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint16_t i;
 	uint32_t nr_descs;
@@ -951,7 +951,7 @@  fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 				uint16_t avail_idx, uint16_t *desc_count,
 				struct buf_vector *buf_vec, uint16_t *vec_idx,
 				uint16_t *buf_id, uint32_t *len, uint8_t perm)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	bool wrap_counter = vq->avail_wrap_counter;
 	struct vring_packed_desc *descs = vq->desc_packed;
@@ -1017,7 +1017,7 @@  static __rte_noinline void
 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct buf_vector *buf_vec,
 		struct virtio_net_hdr_mrg_rxbuf *hdr)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint64_t len;
 	uint64_t remain = dev->vhost_hlen;
@@ -1120,8 +1120,8 @@  static __rte_always_inline int
 async_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mbuf *m, uint32_t mbuf_offset,
 		uint64_t buf_iova, uint32_t cpy_len, bool to_desc)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	struct vhost_async *async = vq->async;
 	uint64_t mapped_len;
@@ -1162,7 +1162,7 @@  static __rte_always_inline void
 sync_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mbuf *m, uint32_t mbuf_offset,
 		uint64_t buf_addr, uint64_t buf_iova, uint32_t cpy_len, bool to_desc)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
 
@@ -1200,8 +1200,8 @@  static __rte_always_inline int
 mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mbuf *m, struct buf_vector *buf_vec,
 		uint16_t nr_vec, uint16_t num_buffers, bool is_async)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint32_t vec_idx = 0;
 	uint32_t mbuf_offset, mbuf_avail;
@@ -1330,8 +1330,8 @@  vhost_enqueue_single_packed(struct virtio_net *dev,
 			    struct rte_mbuf *pkt,
 			    struct buf_vector *buf_vec,
 			    uint16_t *nr_descs)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint16_t nr_vec = 0;
 	uint16_t avail_idx = vq->last_avail_idx;
@@ -1392,8 +1392,8 @@  vhost_enqueue_single_packed(struct virtio_net *dev,
 static __rte_noinline uint32_t
 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint32_t pkt_idx = 0;
 	uint16_t num_buffers;
@@ -1452,7 +1452,7 @@  virtio_dev_rx_sync_batch_check(struct virtio_net *dev,
 			   struct rte_mbuf **pkts,
 			   uint64_t *desc_addrs,
 			   uint64_t *lens)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	bool wrap_counter = vq->avail_wrap_counter;
 	struct vring_packed_desc *descs = vq->desc_packed;
@@ -1556,7 +1556,7 @@  virtio_dev_rx_batch_packed_copy(struct virtio_net *dev,
 			   struct rte_mbuf **pkts,
 			   uint64_t *desc_addrs,
 			   uint64_t *lens)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
 	struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
@@ -1604,7 +1604,7 @@  static __rte_always_inline int
 virtio_dev_rx_sync_batch_packed(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
 			   struct rte_mbuf **pkts)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint64_t desc_addrs[PACKED_BATCH_SIZE];
 	uint64_t lens[PACKED_BATCH_SIZE];
@@ -1626,8 +1626,8 @@  static __rte_always_inline int16_t
 virtio_dev_rx_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mbuf *pkt)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 	uint16_t nr_descs = 0;
@@ -1652,8 +1652,8 @@  virtio_dev_rx_packed(struct virtio_net *dev,
 		     struct vhost_virtqueue *__rte_restrict vq,
 		     struct rte_mbuf **__rte_restrict pkts,
 		     uint32_t count)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint32_t pkt_idx = 0;
 
@@ -1767,7 +1767,7 @@  rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
 
 static __rte_always_inline uint16_t
 async_get_first_inflight_pkt_idx(struct vhost_virtqueue *vq)
-	__rte_shared_locks_required(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
 {
 	struct vhost_async *async = vq->async;
 
@@ -1796,8 +1796,8 @@  store_dma_desc_info_split(struct vring_used_elem *s_ring, struct vring_used_elem
 static __rte_noinline uint32_t
 virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
-	__rte_exclusive_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 	uint32_t pkt_idx = 0;
@@ -1907,8 +1907,8 @@  vhost_enqueue_async_packed(struct virtio_net *dev,
 			    struct buf_vector *buf_vec,
 			    uint16_t *nr_descs,
 			    uint16_t *nr_buffers)
-	__rte_exclusive_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint16_t nr_vec = 0;
 	uint16_t avail_idx = vq->last_avail_idx;
@@ -1967,8 +1967,8 @@  vhost_enqueue_async_packed(struct virtio_net *dev,
 static __rte_always_inline int16_t
 virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			    struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers)
-	__rte_exclusive_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 
@@ -1991,8 +1991,8 @@  virtio_dev_rx_async_packed_batch_enqueue(struct virtio_net *dev,
 			   struct rte_mbuf **pkts,
 			   uint64_t *desc_addrs,
 			   uint64_t *lens)
-	__rte_exclusive_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
 	struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
@@ -2053,8 +2053,8 @@  virtio_dev_rx_async_packed_batch(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
 			   struct rte_mbuf **pkts,
 			   int16_t dma_id, uint16_t vchan_id)
-	__rte_exclusive_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint64_t desc_addrs[PACKED_BATCH_SIZE];
 	uint64_t lens[PACKED_BATCH_SIZE];
@@ -2070,7 +2070,7 @@  virtio_dev_rx_async_packed_batch(struct virtio_net *dev,
 static __rte_always_inline void
 dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
 			uint32_t nr_err, uint32_t *pkt_idx)
-	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_requires_capability(&vq->access_lock)
 {
 	uint16_t descs_err = 0;
 	uint16_t buffers_err = 0;
@@ -2102,8 +2102,8 @@  dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
 static __rte_noinline uint32_t
 virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
-	__rte_exclusive_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint32_t pkt_idx = 0;
 	uint16_t n_xfer;
@@ -2176,7 +2176,7 @@  virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue
 
 static __rte_always_inline void
 write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
-	__rte_shared_locks_required(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
 {
 	struct vhost_async *async = vq->async;
 	uint16_t nr_left = n_descs;
@@ -2209,7 +2209,7 @@  write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
 static __rte_always_inline void
 write_back_completed_descs_packed(struct vhost_virtqueue *vq,
 				uint16_t n_buffers)
-	__rte_shared_locks_required(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
 {
 	struct vhost_async *async = vq->async;
 	uint16_t from = async->last_buffer_idx_packed;
@@ -2274,7 +2274,7 @@  write_back_completed_descs_packed(struct vhost_virtqueue *vq,
 static __rte_always_inline uint16_t
 vhost_poll_enqueue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint16_t count, int16_t dma_id, uint16_t vchan_id)
-	__rte_shared_locks_required(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
 {
 	struct vhost_async *async = vq->async;
 	struct async_inflight_info *pkts_info = async->pkts_info;
@@ -2884,8 +2884,8 @@  desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		  struct buf_vector *buf_vec, uint16_t nr_vec,
 		  struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
 		  bool legacy_ol_flags, uint16_t slot_idx, bool is_async)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint32_t buf_avail, buf_offset, buf_len;
 	uint64_t buf_addr, buf_iova;
@@ -3092,8 +3092,8 @@  static uint16_t
 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
 	bool legacy_ol_flags)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint16_t i;
 	uint16_t avail_entries;
@@ -3188,8 +3188,8 @@  static uint16_t
 virtio_dev_tx_split_legacy(struct virtio_net *dev,
 	struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **pkts, uint16_t count)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
 }
@@ -3199,8 +3199,8 @@  static uint16_t
 virtio_dev_tx_split_compliant(struct virtio_net *dev,
 	struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **pkts, uint16_t count)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
 }
@@ -3212,7 +3212,7 @@  vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 				 uint16_t avail_idx,
 				 uintptr_t *desc_addrs,
 				 uint16_t *ids)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	bool wrap = vq->avail_wrap_counter;
 	struct vring_packed_desc *descs = vq->desc_packed;
@@ -3356,7 +3356,7 @@  virtio_dev_tx_batch_packed(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
 			   struct rte_mbuf **pkts,
 			   bool legacy_ol_flags)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint16_t avail_idx = vq->last_avail_idx;
 	uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
@@ -3403,8 +3403,8 @@  vhost_dequeue_single_packed(struct virtio_net *dev,
 			    uint16_t *buf_id,
 			    uint16_t *desc_count,
 			    bool legacy_ol_flags)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 	uint32_t buf_len;
@@ -3453,8 +3453,8 @@  virtio_dev_tx_single_packed(struct virtio_net *dev,
 			    struct rte_mempool *mbuf_pool,
 			    struct rte_mbuf *pkts,
 			    bool legacy_ol_flags)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 
 	uint16_t buf_id, desc_count = 0;
@@ -3514,8 +3514,8 @@  virtio_dev_tx_packed(struct virtio_net *dev,
 		     struct rte_mbuf **__rte_restrict pkts,
 		     uint32_t count,
 		     bool legacy_ol_flags)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint32_t pkt_idx = 0;
 
@@ -3565,8 +3565,8 @@  static uint16_t
 virtio_dev_tx_packed_legacy(struct virtio_net *dev,
 	struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **__rte_restrict pkts, uint32_t count)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
 }
@@ -3576,8 +3576,8 @@  static uint16_t
 virtio_dev_tx_packed_compliant(struct virtio_net *dev,
 	struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **__rte_restrict pkts, uint32_t count)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
 }
@@ -3696,7 +3696,7 @@  static __rte_always_inline uint16_t
 async_poll_dequeue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
 		uint16_t vchan_id, bool legacy_ol_flags)
-	__rte_shared_locks_required(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
 {
 	uint16_t start_idx, from, i;
 	uint16_t nr_cpl_pkts = 0;
@@ -3744,8 +3744,8 @@  static __rte_always_inline uint16_t
 virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
 		int16_t dma_id, uint16_t vchan_id, bool legacy_ol_flags)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	static bool allocerr_warned;
 	bool dropped = false;
@@ -3897,8 +3897,8 @@  virtio_dev_tx_async_split_legacy(struct virtio_net *dev,
 		struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
 		struct rte_mbuf **pkts, uint16_t count,
 		int16_t dma_id, uint16_t vchan_id)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
 				pkts, count, dma_id, vchan_id, true);
@@ -3910,8 +3910,8 @@  virtio_dev_tx_async_split_compliant(struct virtio_net *dev,
 		struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
 		struct rte_mbuf **pkts, uint16_t count,
 		int16_t dma_id, uint16_t vchan_id)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
 				pkts, count, dma_id, vchan_id, false);
@@ -3920,7 +3920,7 @@  virtio_dev_tx_async_split_compliant(struct virtio_net *dev,
 static __rte_always_inline void
 vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
 				uint16_t buf_id, uint16_t count)
-	__rte_shared_locks_required(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
 {
 	struct vhost_async *async = vq->async;
 	uint16_t idx = async->buffer_idx_packed;
@@ -3942,8 +3942,8 @@  virtio_dev_tx_async_single_packed(struct virtio_net *dev,
 			struct rte_mbuf *pkts,
 			uint16_t slot_idx,
 			bool legacy_ol_flags)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	int err;
 	uint16_t buf_id, desc_count = 0;
@@ -3995,8 +3995,8 @@  virtio_dev_tx_async_packed_batch(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
 			   struct rte_mbuf **pkts, uint16_t slot_idx,
 			   uint16_t dma_id, uint16_t vchan_id)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint16_t avail_idx = vq->last_avail_idx;
 	uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
@@ -4053,8 +4053,8 @@  static __rte_always_inline uint16_t
 virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
 		uint16_t count, uint16_t dma_id, uint16_t vchan_id, bool legacy_ol_flags)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	uint32_t pkt_idx = 0;
 	uint16_t slot_idx = 0;
@@ -4167,8 +4167,8 @@  static uint16_t
 virtio_dev_tx_async_packed_legacy(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
 		uint16_t count, uint16_t dma_id, uint16_t vchan_id)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
 				pkts, count, dma_id, vchan_id, true);
@@ -4179,8 +4179,8 @@  static uint16_t
 virtio_dev_tx_async_packed_compliant(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
 		uint16_t count, uint16_t dma_id, uint16_t vchan_id)
-	__rte_shared_locks_required(&vq->access_lock)
-	__rte_shared_locks_required(&vq->iotlb_lock)
+	__rte_requires_shared_capability(&vq->access_lock)
+	__rte_requires_shared_capability(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
 				pkts, count, dma_id, vchan_id, false);
diff --git a/lib/vhost/virtio_net_ctrl.c b/lib/vhost/virtio_net_ctrl.c
index b8ee94018e..999e84db7c 100644
--- a/lib/vhost/virtio_net_ctrl.c
+++ b/lib/vhost/virtio_net_ctrl.c
@@ -26,7 +26,7 @@  struct virtio_net_ctrl_elem {
 static int
 virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
 		struct virtio_net_ctrl_elem *ctrl_elem)
-	__rte_shared_locks_required(&cvq->iotlb_lock)
+	__rte_requires_shared_capability(&cvq->iotlb_lock)
 {
 	uint16_t avail_idx, desc_idx, n_descs = 0;
 	uint64_t desc_len, desc_addr, desc_iova, data_len = 0;