perf/x86: Remove task_ctx_size
authorKan Liang <kan.liang@linux.intel.com>
Fri, 3 Jul 2020 12:49:24 +0000 (05:49 -0700)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 8 Jul 2020 09:38:55 +0000 (11:38 +0200)
A new kmem_cache method has replaced the kzalloc() to allocate the PMU
specific data. The task_ctx_size is not required anymore.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1593780569-62993-19-git-send-email-kan.liang@linux.intel.com
arch/x86/events/core.c
arch/x86/events/intel/lbr.c
include/linux/perf_event.h
kernel/events/core.c

index d740c86..6b1228a 100644 (file)
@@ -2371,7 +2371,6 @@ static struct pmu pmu = {
 
        .event_idx              = x86_pmu_event_idx,
        .sched_task             = x86_pmu_sched_task,
-       .task_ctx_size          = sizeof(struct x86_perf_task_context),
        .swap_task_ctx          = x86_pmu_swap_task_ctx,
        .check_period           = x86_pmu_check_period,
 
index e784c1d..3ad5289 100644 (file)
@@ -1672,7 +1672,6 @@ void __init intel_pmu_arch_lbr_init(void)
 
        size = sizeof(struct x86_perf_task_context_arch_lbr) +
               lbr_nr * sizeof(struct lbr_entry);
-       x86_get_pmu()->task_ctx_size = size;
        x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0);
 
        x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0;
index 09915ae..3b22db0 100644 (file)
@@ -419,10 +419,6 @@ struct pmu {
         */
        void (*sched_task)              (struct perf_event_context *ctx,
                                        bool sched_in);
-       /*
-        * PMU specific data size
-        */
-       size_t                          task_ctx_size;
 
        /*
         * Kmem cache of PMU specific data
index 30d9b31..7c436d7 100644 (file)
@@ -1243,15 +1243,13 @@ static void *alloc_task_ctx_data(struct pmu *pmu)
        if (pmu->task_ctx_cache)
                return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL);
 
-       return kzalloc(pmu->task_ctx_size, GFP_KERNEL);
+       return NULL;
 }
 
 static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data)
 {
        if (pmu->task_ctx_cache && task_ctx_data)
                kmem_cache_free(pmu->task_ctx_cache, task_ctx_data);
-       else
-               kfree(task_ctx_data);
 }
 
 static void free_ctx(struct rcu_head *head)