cycle_t 298 arch/x86/kernel/hpet.c static cycle_t read_hpet(void) cycle_t 300 arch/x86/kernel/hpet.c return (cycle_t)hpet_readl(HPET_COUNTER); cycle_t 304 arch/x86/kernel/hpet.c static cycle_t __vsyscall_fn vread_hpet(void) cycle_t 326 arch/x86/kernel/hpet.c cycle_t t1; cycle_t 134 arch/x86/kernel/i8253.c static cycle_t pit_read(void) cycle_t 192 arch/x86/kernel/i8253.c return (cycle_t)(jifs * LATCH) + count; cycle_t 70 arch/x86/kernel/kvmclock.c static cycle_t kvm_clock_read(void) cycle_t 73 arch/x86/kernel/kvmclock.c cycle_t ret; cycle_t 100 arch/x86/kernel/pvclock.c cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) cycle_t 104 arch/x86/kernel/pvclock.c cycle_t ret, offset; cycle_t 667 arch/x86/kernel/tsc.c static cycle_t read_tsc(void) cycle_t 669 arch/x86/kernel/tsc.c cycle_t ret = (cycle_t)get_cycles(); cycle_t 676 arch/x86/kernel/tsc.c static cycle_t __vsyscall_fn vread_tsc(void) cycle_t 678 arch/x86/kernel/tsc.c cycle_t ret = (cycle_t)vget_cycles(); cycle_t 139 arch/x86/kernel/vmiclock_32.c cycle_t now, cycles_per_hz; cycle_t 178 arch/x86/kernel/vmiclock_32.c cycle_t now = vmi_timer_ops.get_cycle_counter(vmi_counter(VMI_ONESHOT)); cycle_t 211 arch/x86/kernel/vmiclock_32.c cycle_t cycles_per_msec; cycle_t 284 arch/x86/kernel/vmiclock_32.c static cycle_t read_real_cycles(void) cycle_t 301 arch/x86/kernel/vmiclock_32.c cycle_t cycles_per_msec; cycle_t 119 arch/x86/kernel/vsyscall_64.c cycle_t now, base, mask, cycle_delta; cycle_t 122 arch/x86/kernel/vsyscall_64.c cycle_t (*vread)(void); cycle_t 618 arch/x86/lguest/boot.c static cycle_t lguest_clock_read(void) cycle_t 167 arch/x86/xen/time.c cycle_t now; cycle_t 214 arch/x86/xen/time.c cycle_t xen_clocksource_read(void) cycle_t 217 arch/x86/xen/time.c cycle_t ret; cycle_t 38 arch/x86/xen/xen-ops.h cycle_t xen_clocksource_read(void); cycle_t 8 include/asm-x86/pvclock.h cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); cycle_t 17 include/asm-x86/vgtod.h cycle_t (*vread)(void); cycle_t 18 include/asm-x86/vgtod.h cycle_t cycle_last; cycle_t 19 include/asm-x86/vgtod.h cycle_t mask; cycle_t 63 include/linux/clocksource.h cycle_t (*read)(void); cycle_t 64 include/linux/clocksource.h cycle_t mask; cycle_t 68 include/linux/clocksource.h cycle_t (*vread)(void); cycle_t 78 include/linux/clocksource.h cycle_t cycle_interval; cycle_t 85 include/linux/clocksource.h cycle_t cycle_last ____cacheline_aligned_in_smp; cycle_t 92 include/linux/clocksource.h cycle_t wd_last; cycle_t 108 include/linux/clocksource.h #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) cycle_t 167 include/linux/clocksource.h static inline cycle_t clocksource_read(struct clocksource *cs) cycle_t 181 include/linux/clocksource.h static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles) cycle_t 210 include/linux/clocksource.h c->cycle_interval = (cycle_t)tmp; cycle_t 76 kernel/time/clocksource.c static cycle_t watchdog_last; cycle_t 100 kernel/time/clocksource.c cycle_t csnow, wdnow; cycle_t 53 kernel/time/jiffies.c static cycle_t jiffies_read(void) cycle_t 55 kernel/time/jiffies.c return (cycle_t) jiffies; cycle_t 69 kernel/time/timekeeping.c cycle_t cycle_now, cycle_delta; cycle_t 173 kernel/time/timekeeping.c cycle_t now; cycle_t 446 kernel/time/timekeeping.c cycle_t offset; cycle_t 698 kernel/trace/ftrace.c static cycle_t ftrace_update_time; cycle_t 705 kernel/trace/ftrace.c cycle_t start, stop; cycle_t 37 kernel/trace/trace.c unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; cycle_t 54 kernel/trace/trace.c ns2usecs(cycle_t nsec) cycle_t 61 kernel/trace/trace.c cycle_t ftrace_now(int cpu) cycle_t 75 kernel/trace/trace.h cycle_t t; cycle_t 113 kernel/trace/trace.h cycle_t preempt_timestamp; cycle_t 130 kernel/trace/trace.h cycle_t time_start; cycle_t 240 kernel/trace/trace.h extern cycle_t ftrace_now(int cpu); cycle_t 315 kernel/trace/trace.h extern long ns2usecs(cycle_t nsec); cycle_t 112 kernel/trace/trace_irqsoff.c static int report_latency(cycle_t delta) cycle_t 131 kernel/trace/trace_irqsoff.c cycle_t T0, T1, delta; cycle_t 102 kernel/trace/trace_sched_wakeup.c static int report_latency(cycle_t delta) cycle_t 122 kernel/trace/trace_sched_wakeup.c cycle_t T0, T1, delta;