Prev: [RFC][PATCH v2 0/11] perf: remove __weak function hw_perf_event_init
Next: [RFC][PATCH v2 10/11] perf: core, lookup pmu via sysfs
From: Lin Ming on 18 May 2010 13:50 Mostly copy hw_perf_event_init to implement it. Backup hw_perf_event_init to the end of file and it will be removed later. Changes log v2: Backup hw_perf_event_init for smooth transition(Peter Zijlstra) v1: x86, implement api pmu::init_event Signed-off-by: Lin Ming <ming.m.lin(a)intel.com> --- arch/x86/kernel/cpu/perf_event.c | 65 +++++++++++++++++++++++++++++--------- 1 files changed, 50 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5c6a0d9..aad4221 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1362,6 +1362,9 @@ void __init init_hw_perf_events(void) pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); perf_cpu_notifier(x86_pmu_notifier); + + if (!perf_event_register_pmu(&pmu)) + pr_info("Register pmu fails\n"); } static inline void x86_pmu_read(struct perf_event *event) @@ -1422,18 +1425,6 @@ static int x86_pmu_commit_txn(struct pmu *pmu) return 0; } -static struct pmu pmu = { - .enable = x86_pmu_enable, - .disable = x86_pmu_disable, - .start = x86_pmu_start, - .stop = x86_pmu_stop, - .read = x86_pmu_read, - .unthrottle = x86_pmu_unthrottle, - .start_txn = x86_pmu_start_txn, - .cancel_txn = x86_pmu_cancel_txn, - .commit_txn = x86_pmu_commit_txn, -}; - /* * validate that we can schedule this event */ @@ -1508,7 +1499,7 @@ out: return ret; } -struct pmu *hw_perf_event_init(struct perf_event *event) +static int x86_pmu_init_event(struct perf_event *event) { struct pmu *tmp; int err; @@ -1533,12 +1524,25 @@ struct pmu *hw_perf_event_init(struct perf_event *event) if (err) { if (event->destroy) event->destroy(event); - return ERR_PTR(err); } - return &pmu; + return err; } +static struct pmu pmu = { + .id = PMU_TYPE_CPU, + .enable = x86_pmu_enable, + .disable = x86_pmu_disable, + .start = x86_pmu_start, + .stop = x86_pmu_stop, + .read = x86_pmu_read, + .unthrottle = x86_pmu_unthrottle, + .start_txn = x86_pmu_start_txn, + .cancel_txn = x86_pmu_cancel_txn, + .commit_txn = x86_pmu_commit_txn, + .init_event = x86_pmu_init_event, +}; + /* * callchain support */ @@ -1753,3 +1757,34 @@ unsigned long perf_misc_flags(struct pt_regs *regs) return misc; } + +struct pmu *hw_perf_event_init(struct perf_event *event) +{ + struct pmu *tmp; + int err; + + err = __hw_perf_event_init(event); + if (!err) { + /* + * we temporarily connect event to its pmu + * such that validate_group() can classify + * it as an x86 event using is_x86_event() + */ + tmp = event->pmu; + event->pmu = &pmu; + + if (event->group_leader != event) + err = validate_group(event); + else + err = validate_event(event); + + event->pmu = tmp; + } + if (err) { + if (event->destroy) + event->destroy(event); + return ERR_PTR(err); + } + + return &pmu; +} -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo(a)vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/ |