.event_idx = x86_pmu_event_idx,
.sched_task = x86_pmu_sched_task,
- .task_ctx_size = sizeof(struct x86_perf_task_context),
.swap_task_ctx = x86_pmu_swap_task_ctx,
.check_period = x86_pmu_check_period,
size = sizeof(struct x86_perf_task_context_arch_lbr) +
lbr_nr * sizeof(struct lbr_entry);
- x86_get_pmu()->task_ctx_size = size;
x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0);
x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0;
*/
void (*sched_task) (struct perf_event_context *ctx,
bool sched_in);
- /*
- * PMU specific data size
- */
- size_t task_ctx_size;
/*
* Kmem cache of PMU specific data
if (pmu->task_ctx_cache)
return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL);
- return kzalloc(pmu->task_ctx_size, GFP_KERNEL);
+ return NULL;
}
static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data)
{
if (pmu->task_ctx_cache && task_ctx_data)
kmem_cache_free(pmu->task_ctx_cache, task_ctx_data);
- else
- kfree(task_ctx_data);
}
static void free_ctx(struct rcu_head *head)