INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
spin_lock_init(&dp->reo_cmd_lock);
+ dp->reo_cmd_cache_flush_count = 0;
+
ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
if (ret) {
ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
struct ath11k_base *ab;
};
+#define DP_REO_DESC_FREE_THRESHOLD 64
#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
struct dp_reo_cache_flush_elem {
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
struct list_head reo_cmd_list;
struct list_head reo_cmd_cache_flush_list;
- /* protects access to reo_cmd_list and reo_cmd_cache_flush_list */
+ u32 reo_cmd_cache_flush_count;
+ /**
+ * protects access to below fields,
+ * - reo_cmd_list
+ * - reo_cmd_cache_flush_list
+ * - reo_cmd_cache_flush_count
+ */
spinlock_t reo_cmd_lock;
};
list_for_each_entry_safe(cmd_cache, tmp_cache,
&dp->reo_cmd_cache_flush_list, list) {
list_del(&cmd_cache->list);
+ dp->reo_cmd_cache_flush_count--;
dma_unmap_single(ab->dev, cmd_cache->data.paddr,
cmd_cache->data.size, DMA_BIDIRECTIONAL);
kfree(cmd_cache->data.vaddr);
spin_lock_bh(&dp->reo_cmd_lock);
list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
+ dp->reo_cmd_cache_flush_count++;
spin_unlock_bh(&dp->reo_cmd_lock);
/* Flush and invalidate aged REO desc from HW cache */
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
list) {
- if (time_after(jiffies, elem->ts +
+ if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
+ time_after(jiffies, elem->ts +
msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
list_del(&elem->list);
+ dp->reo_cmd_cache_flush_count--;
spin_unlock_bh(&dp->reo_cmd_lock);
ath11k_dp_reo_cache_flush(ab, &elem->data);