continue;
if (WARN_ON(!qh->notifier_fn))
continue;
- atomic_inc(&qh->stats.notifies);
+ this_cpu_inc(qh->stats->notifies);
qh->notifier_fn(qh->notifier_fn_arg);
}
rcu_read_unlock();
if (!qh)
return ERR_PTR(-ENOMEM);
+ qh->stats = alloc_percpu(struct knav_queue_stats);
+ if (!qh->stats) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
qh->flags = flags;
qh->inst = inst;
id = inst->id - inst->qmgr->start_queue;
if (range->ops && range->ops->open_queue)
ret = range->ops->open_queue(range, inst, flags);
- if (ret) {
- devm_kfree(inst->kdev->dev, qh);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto err;
}
list_add_tail_rcu(&qh->list, &inst->handles);
return qh;
+
+err:
+ if (qh->stats)
+ free_percpu(qh->stats);
+ devm_kfree(inst->kdev->dev, qh);
+ return ERR_PTR(ret);
}
static struct knav_queue *
{
struct knav_device *kdev = inst->kdev;
struct knav_queue *qh;
+ int cpu = 0;
+ int pushes = 0;
+ int pops = 0;
+ int push_errors = 0;
+ int pop_errors = 0;
+ int notifies = 0;
if (!knav_queue_is_busy(inst))
return;
seq_printf(s, "\tqueue id %d (%s)\n",
kdev->base_id + inst->id, inst->name);
for_each_handle_rcu(qh, inst) {
- seq_printf(s, "\t\thandle %p: ", qh);
- seq_printf(s, "pushes %8d, ",
- atomic_read(&qh->stats.pushes));
- seq_printf(s, "pops %8d, ",
- atomic_read(&qh->stats.pops));
- seq_printf(s, "count %8d, ",
- knav_queue_get_count(qh));
- seq_printf(s, "notifies %8d, ",
- atomic_read(&qh->stats.notifies));
- seq_printf(s, "push errors %8d, ",
- atomic_read(&qh->stats.push_errors));
- seq_printf(s, "pop errors %8d\n",
- atomic_read(&qh->stats.pop_errors));
+ for_each_possible_cpu(cpu) {
+ pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
+ pops += per_cpu_ptr(qh->stats, cpu)->pops;
+ push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
+ pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
+ notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
+ }
+
+ seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
+ qh,
+ pushes,
+ pops,
+ knav_queue_get_count(qh),
+ notifies,
+ push_errors,
+ pop_errors);
}
}
if (range->ops && range->ops->close_queue)
range->ops->close_queue(range, inst);
}
+ free_percpu(qh->stats);
devm_kfree(inst->kdev->dev, qh);
}
EXPORT_SYMBOL_GPL(knav_queue_close);
val = (u32)dma | ((size / 16) - 1);
writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
- atomic_inc(&qh->stats.pushes);
+ this_cpu_inc(qh->stats->pushes);
return 0;
}
EXPORT_SYMBOL_GPL(knav_queue_push);
if (size)
*size = ((val & DESC_SIZE_MASK) + 1) * 16;
- atomic_inc(&qh->stats.pops);
+ this_cpu_inc(qh->stats->pops);
return dma;
}
EXPORT_SYMBOL_GPL(knav_queue_pop);