| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192 |
- // SPDX-License-Identifier: GPL-2.0
- #ifndef _LINUX_KERNEL_TRACE_H
- #define _LINUX_KERNEL_TRACE_H
- #include <linux/fs.h>
- #include <linux/atomic.h>
- #include <linux/sched.h>
- #include <linux/clocksource.h>
- #include <linux/ring_buffer.h>
- #include <linux/mmiotrace.h>
- #include <linux/tracepoint.h>
- #include <linux/ftrace.h>
- #include <linux/trace.h>
- #include <linux/hw_breakpoint.h>
- #include <linux/trace_seq.h>
- #include <linux/trace_events.h>
- #include <linux/compiler.h>
- #include <linux/glob.h>
- #include <linux/irq_work.h>
- #include <linux/workqueue.h>
- #include <linux/ctype.h>
- #include <linux/once_lite.h>
- #include "pid_list.h"
- #ifdef CONFIG_FTRACE_SYSCALLS
- #include <asm/unistd.h> /* For NR_syscalls */
- #include <asm/syscall.h> /* some archs define it here */
- #endif
- #define TRACE_MODE_WRITE 0640
- #define TRACE_MODE_READ 0440
- enum trace_type {
- __TRACE_FIRST_TYPE = 0,
- TRACE_FN,
- TRACE_CTX,
- TRACE_WAKE,
- TRACE_STACK,
- TRACE_PRINT,
- TRACE_BPRINT,
- TRACE_MMIO_RW,
- TRACE_MMIO_MAP,
- TRACE_BRANCH,
- TRACE_GRAPH_RET,
- TRACE_GRAPH_ENT,
- TRACE_USER_STACK,
- TRACE_BLK,
- TRACE_BPUTS,
- TRACE_HWLAT,
- TRACE_OSNOISE,
- TRACE_TIMERLAT,
- TRACE_RAW_DATA,
- TRACE_FUNC_REPEATS,
- __TRACE_LAST_TYPE,
- };
- #undef __field
- #define __field(type, item) type item;
- #undef __field_fn
- #define __field_fn(type, item) type item;
- #undef __field_struct
- #define __field_struct(type, item) __field(type, item)
- #undef __field_desc
- #define __field_desc(type, container, item)
- #undef __field_packed
- #define __field_packed(type, container, item)
- #undef __array
- #define __array(type, item, size) type item[size];
- /*
- * For backward compatibility, older user space expects to see the
- * kernel_stack event with a fixed size caller field. But today the fix
- * size is ignored by the kernel, and the real structure is dynamic.
- * Expose to user space: "unsigned long caller[8];" but the real structure
- * will be "unsigned long caller[] __counted_by(size)"
- */
- #undef __stack_array
- #define __stack_array(type, item, size, field) type item[] __counted_by(field);
- #undef __array_desc
- #define __array_desc(type, container, item, size)
- #undef __dynamic_array
- #define __dynamic_array(type, item) type item[];
- #undef __rel_dynamic_array
- #define __rel_dynamic_array(type, item) type item[];
- #undef F_STRUCT
- #define F_STRUCT(args...) args
- #undef FTRACE_ENTRY
- #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
- struct struct_name { \
- struct trace_entry ent; \
- tstruct \
- }
- #undef FTRACE_ENTRY_DUP
- #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
- #undef FTRACE_ENTRY_REG
- #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
- FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
- #undef FTRACE_ENTRY_PACKED
- #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \
- FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
- #include "trace_entries.h"
- /* Use this for memory failure errors */
- #define MEM_FAIL(condition, fmt, ...) \
- DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__)
- #define FAULT_STRING "(fault)"
- #define HIST_STACKTRACE_DEPTH 16
- #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
- #define HIST_STACKTRACE_SKIP 5
- /*
- * syscalls are special, and need special handling, this is why
- * they are not included in trace_entries.h
- */
- struct syscall_trace_enter {
- struct trace_entry ent;
- int nr;
- unsigned long args[];
- };
- struct syscall_trace_exit {
- struct trace_entry ent;
- int nr;
- long ret;
- };
- struct kprobe_trace_entry_head {
- struct trace_entry ent;
- unsigned long ip;
- };
- struct eprobe_trace_entry_head {
- struct trace_entry ent;
- };
- struct kretprobe_trace_entry_head {
- struct trace_entry ent;
- unsigned long func;
- unsigned long ret_ip;
- };
- struct fentry_trace_entry_head {
- struct trace_entry ent;
- unsigned long ip;
- };
- struct fexit_trace_entry_head {
- struct trace_entry ent;
- unsigned long func;
- unsigned long ret_ip;
- };
- #define TRACE_BUF_SIZE 1024
- struct trace_array;
- /*
- * The CPU trace array - it consists of thousands of trace entries
- * plus some other descriptor data: (for example which task started
- * the trace, etc.)
- */
- struct trace_array_cpu {
- atomic_t disabled;
- void *buffer_page; /* ring buffer spare */
- unsigned long entries;
- unsigned long saved_latency;
- unsigned long critical_start;
- unsigned long critical_end;
- unsigned long critical_sequence;
- unsigned long nice;
- unsigned long policy;
- unsigned long rt_priority;
- unsigned long skipped_entries;
- u64 preempt_timestamp;
- pid_t pid;
- kuid_t uid;
- char comm[TASK_COMM_LEN];
- #ifdef CONFIG_FUNCTION_TRACER
- int ftrace_ignore_pid;
- #endif
- bool ignore_pid;
- };
- struct tracer;
- struct trace_option_dentry;
- struct array_buffer {
- struct trace_array *tr;
- struct trace_buffer *buffer;
- struct trace_array_cpu __percpu *data;
- u64 time_start;
- int cpu;
- };
- #define TRACE_FLAGS_MAX_SIZE 32
- struct trace_options {
- struct tracer *tracer;
- struct trace_option_dentry *topts;
- };
- struct trace_pid_list *trace_pid_list_alloc(void);
- void trace_pid_list_free(struct trace_pid_list *pid_list);
- bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid);
- int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid);
- int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid);
- int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid);
- int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid,
- unsigned int *next);
- enum {
- TRACE_PIDS = BIT(0),
- TRACE_NO_PIDS = BIT(1),
- };
- static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
- struct trace_pid_list *no_pid_list)
- {
- /* Return true if the pid list in type has pids */
- return ((type & TRACE_PIDS) && pid_list) ||
- ((type & TRACE_NO_PIDS) && no_pid_list);
- }
- static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
- struct trace_pid_list *no_pid_list)
- {
- /*
- * Turning off what is in @type, return true if the "other"
- * pid list, still has pids in it.
- */
- return (!(type & TRACE_PIDS) && pid_list) ||
- (!(type & TRACE_NO_PIDS) && no_pid_list);
- }
- typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
- /**
- * struct cond_snapshot - conditional snapshot data and callback
- *
- * The cond_snapshot structure encapsulates a callback function and
- * data associated with the snapshot for a given tracing instance.
- *
- * When a snapshot is taken conditionally, by invoking
- * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
- * passed in turn to the cond_snapshot.update() function. That data
- * can be compared by the update() implementation with the cond_data
- * contained within the struct cond_snapshot instance associated with
- * the trace_array. Because the tr->max_lock is held throughout the
- * update() call, the update() function can directly retrieve the
- * cond_snapshot and cond_data associated with the per-instance
- * snapshot associated with the trace_array.
- *
- * The cond_snapshot.update() implementation can save data to be
- * associated with the snapshot if it decides to, and returns 'true'
- * in that case, or it returns 'false' if the conditional snapshot
- * shouldn't be taken.
- *
- * The cond_snapshot instance is created and associated with the
- * user-defined cond_data by tracing_cond_snapshot_enable().
- * Likewise, the cond_snapshot instance is destroyed and is no longer
- * associated with the trace instance by
- * tracing_cond_snapshot_disable().
- *
- * The method below is required.
- *
- * @update: When a conditional snapshot is invoked, the update()
- * callback function is invoked with the tr->max_lock held. The
- * update() implementation signals whether or not to actually
- * take the snapshot, by returning 'true' if so, 'false' if no
- * snapshot should be taken. Because the max_lock is held for
- * the duration of update(), the implementation is safe to
- * directly retrieved and save any implementation data it needs
- * to in association with the snapshot.
- */
- struct cond_snapshot {
- void *cond_data;
- cond_update_fn_t update;
- };
- /*
- * struct trace_func_repeats - used to keep track of the consecutive
- * (on the same CPU) calls of a single function.
- */
- struct trace_func_repeats {
- unsigned long ip;
- unsigned long parent_ip;
- unsigned long count;
- u64 ts_last_call;
- };
- /*
- * The trace array - an array of per-CPU trace arrays. This is the
- * highest level data structure that individual tracers deal with.
- * They have on/off state as well:
- */
- struct trace_array {
- struct list_head list;
- char *name;
- struct array_buffer array_buffer;
- #ifdef CONFIG_TRACER_MAX_TRACE
- /*
- * The max_buffer is used to snapshot the trace when a maximum
- * latency is reached, or when the user initiates a snapshot.
- * Some tracers will use this to store a maximum trace while
- * it continues examining live traces.
- *
- * The buffers for the max_buffer are set up the same as the array_buffer
- * When a snapshot is taken, the buffer of the max_buffer is swapped
- * with the buffer of the array_buffer and the buffers are reset for
- * the array_buffer so the tracing can continue.
- */
- struct array_buffer max_buffer;
- bool allocated_snapshot;
- spinlock_t snapshot_trigger_lock;
- unsigned int snapshot;
- unsigned long max_latency;
- #ifdef CONFIG_FSNOTIFY
- struct dentry *d_max_latency;
- struct work_struct fsnotify_work;
- struct irq_work fsnotify_irqwork;
- #endif
- #endif
- /* The below is for memory mapped ring buffer */
- unsigned int mapped;
- unsigned long range_addr_start;
- unsigned long range_addr_size;
- long text_delta;
- long data_delta;
- struct trace_pid_list __rcu *filtered_pids;
- struct trace_pid_list __rcu *filtered_no_pids;
- /*
- * max_lock is used to protect the swapping of buffers
- * when taking a max snapshot. The buffers themselves are
- * protected by per_cpu spinlocks. But the action of the swap
- * needs its own lock.
- *
- * This is defined as a arch_spinlock_t in order to help
- * with performance when lockdep debugging is enabled.
- *
- * It is also used in other places outside the update_max_tr
- * so it needs to be defined outside of the
- * CONFIG_TRACER_MAX_TRACE.
- */
- arch_spinlock_t max_lock;
- int buffer_disabled;
- #ifdef CONFIG_FTRACE_SYSCALLS
- int sys_refcount_enter;
- int sys_refcount_exit;
- struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
- struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
- #endif
- int stop_count;
- int clock_id;
- int nr_topts;
- bool clear_trace;
- int buffer_percent;
- unsigned int n_err_log_entries;
- struct tracer *current_trace;
- unsigned int trace_flags;
- unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
- unsigned int flags;
- raw_spinlock_t start_lock;
- const char *system_names;
- struct list_head err_log;
- struct dentry *dir;
- struct dentry *options;
- struct dentry *percpu_dir;
- struct eventfs_inode *event_dir;
- struct trace_options *topts;
- struct list_head systems;
- struct list_head events;
- struct trace_event_file *trace_marker_file;
- cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
- /* one per_cpu trace_pipe can be opened by only one user */
- cpumask_var_t pipe_cpumask;
- int ref;
- int trace_ref;
- #ifdef CONFIG_FUNCTION_TRACER
- struct ftrace_ops *ops;
- struct trace_pid_list __rcu *function_pids;
- struct trace_pid_list __rcu *function_no_pids;
- #ifdef CONFIG_FUNCTION_GRAPH_TRACER
- struct fgraph_ops *gops;
- #endif
- #ifdef CONFIG_DYNAMIC_FTRACE
- /* All of these are protected by the ftrace_lock */
- struct list_head func_probes;
- struct list_head mod_trace;
- struct list_head mod_notrace;
- #endif
- /* function tracing enabled */
- int function_enabled;
- #endif
- int no_filter_buffering_ref;
- struct list_head hist_vars;
- #ifdef CONFIG_TRACER_SNAPSHOT
- struct cond_snapshot *cond_snapshot;
- #endif
- struct trace_func_repeats __percpu *last_func_repeats;
- /*
- * On boot up, the ring buffer is set to the minimum size, so that
- * we do not waste memory on systems that are not using tracing.
- */
- bool ring_buffer_expanded;
- };
- enum {
- TRACE_ARRAY_FL_GLOBAL = BIT(0),
- TRACE_ARRAY_FL_BOOT = BIT(1),
- };
- extern struct list_head ftrace_trace_arrays;
- extern struct mutex trace_types_lock;
- extern int trace_array_get(struct trace_array *tr);
- extern int tracing_check_open_get_tr(struct trace_array *tr);
- extern struct trace_array *trace_array_find(const char *instance);
- extern struct trace_array *trace_array_find_get(const char *instance);
- extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe);
- extern int tracing_set_filter_buffering(struct trace_array *tr, bool set);
- extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
- extern bool trace_clock_in_ns(struct trace_array *tr);
- /*
- * The global tracer (top) should be the first trace array added,
- * but we check the flag anyway.
- */
- static inline struct trace_array *top_trace_array(void)
- {
- struct trace_array *tr;
- if (list_empty(&ftrace_trace_arrays))
- return NULL;
- tr = list_entry(ftrace_trace_arrays.prev,
- typeof(*tr), list);
- WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
- return tr;
- }
- #define FTRACE_CMP_TYPE(var, type) \
- __builtin_types_compatible_p(typeof(var), type *)
- #undef IF_ASSIGN
- #define IF_ASSIGN(var, entry, etype, id) \
- if (FTRACE_CMP_TYPE(var, etype)) { \
- var = (typeof(var))(entry); \
- WARN_ON(id != 0 && (entry)->type != id); \
- break; \
- }
- /* Will cause compile errors if type is not found. */
- extern void __ftrace_bad_type(void);
- /*
- * The trace_assign_type is a verifier that the entry type is
- * the same as the type being assigned. To add new types simply
- * add a line with the following format:
- *
- * IF_ASSIGN(var, ent, type, id);
- *
- * Where "type" is the trace type that includes the trace_entry
- * as the "ent" item. And "id" is the trace identifier that is
- * used in the trace_type enum.
- *
- * If the type can have more than one id, then use zero.
- */
- #define trace_assign_type(var, ent) \
- do { \
- IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
- IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
- IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
- IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
- IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
- IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
- IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
- IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
- IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\
- IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\
- IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
- IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
- TRACE_MMIO_RW); \
- IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
- TRACE_MMIO_MAP); \
- IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
- IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
- TRACE_GRAPH_ENT); \
- IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
- TRACE_GRAPH_RET); \
- IF_ASSIGN(var, ent, struct func_repeats_entry, \
- TRACE_FUNC_REPEATS); \
- __ftrace_bad_type(); \
- } while (0)
- /*
- * An option specific to a tracer. This is a boolean value.
- * The bit is the bit index that sets its value on the
- * flags value in struct tracer_flags.
- */
- struct tracer_opt {
- const char *name; /* Will appear on the trace_options file */
- u32 bit; /* Mask assigned in val field in tracer_flags */
- };
- /*
- * The set of specific options for a tracer. Your tracer
- * have to set the initial value of the flags val.
- */
- struct tracer_flags {
- u32 val;
- struct tracer_opt *opts;
- struct tracer *trace;
- };
- /* Makes more easy to define a tracer opt */
- #define TRACER_OPT(s, b) .name = #s, .bit = b
- struct trace_option_dentry {
- struct tracer_opt *opt;
- struct tracer_flags *flags;
- struct trace_array *tr;
- struct dentry *entry;
- };
- /**
- * struct tracer - a specific tracer and its callbacks to interact with tracefs
- * @name: the name chosen to select it on the available_tracers file
- * @init: called when one switches to this tracer (echo name > current_tracer)
- * @reset: called when one switches to another tracer
- * @start: called when tracing is unpaused (echo 1 > tracing_on)
- * @stop: called when tracing is paused (echo 0 > tracing_on)
- * @update_thresh: called when tracing_thresh is updated
- * @open: called when the trace file is opened
- * @pipe_open: called when the trace_pipe file is opened
- * @close: called when the trace file is released
- * @pipe_close: called when the trace_pipe file is released
- * @read: override the default read callback on trace_pipe
- * @splice_read: override the default splice_read callback on trace_pipe
- * @selftest: selftest to run on boot (see trace_selftest.c)
- * @print_headers: override the first lines that describe your columns
- * @print_line: callback that prints a trace
- * @set_flag: signals one of your private flags changed (trace_options file)
- * @flags: your private flags
- */
- struct tracer {
- const char *name;
- int (*init)(struct trace_array *tr);
- void (*reset)(struct trace_array *tr);
- void (*start)(struct trace_array *tr);
- void (*stop)(struct trace_array *tr);
- int (*update_thresh)(struct trace_array *tr);
- void (*open)(struct trace_iterator *iter);
- void (*pipe_open)(struct trace_iterator *iter);
- void (*close)(struct trace_iterator *iter);
- void (*pipe_close)(struct trace_iterator *iter);
- ssize_t (*read)(struct trace_iterator *iter,
- struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos);
- ssize_t (*splice_read)(struct trace_iterator *iter,
- struct file *filp,
- loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t len,
- unsigned int flags);
- #ifdef CONFIG_FTRACE_STARTUP_TEST
- int (*selftest)(struct tracer *trace,
- struct trace_array *tr);
- #endif
- void (*print_header)(struct seq_file *m);
- enum print_line_t (*print_line)(struct trace_iterator *iter);
- /* If you handled the flag setting, return 0 */
- int (*set_flag)(struct trace_array *tr,
- u32 old_flags, u32 bit, int set);
- /* Return 0 if OK with change, else return non-zero */
- int (*flag_changed)(struct trace_array *tr,
- u32 mask, int set);
- struct tracer *next;
- struct tracer_flags *flags;
- int enabled;
- bool print_max;
- bool allow_instances;
- #ifdef CONFIG_TRACER_MAX_TRACE
- bool use_max_tr;
- #endif
- /* True if tracer cannot be enabled in kernel param */
- bool noboot;
- };
- static inline struct ring_buffer_iter *
- trace_buffer_iter(struct trace_iterator *iter, int cpu)
- {
- return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
- }
- int tracer_init(struct tracer *t, struct trace_array *tr);
- int tracing_is_enabled(void);
- void tracing_reset_online_cpus(struct array_buffer *buf);
- void tracing_reset_all_online_cpus(void);
- void tracing_reset_all_online_cpus_unlocked(void);
- int tracing_open_generic(struct inode *inode, struct file *filp);
- int tracing_open_generic_tr(struct inode *inode, struct file *filp);
- int tracing_release_generic_tr(struct inode *inode, struct file *file);
- int tracing_open_file_tr(struct inode *inode, struct file *filp);
- int tracing_release_file_tr(struct inode *inode, struct file *filp);
- int tracing_single_release_file_tr(struct inode *inode, struct file *filp);
- bool tracing_is_disabled(void);
- bool tracer_tracing_is_on(struct trace_array *tr);
- void tracer_tracing_on(struct trace_array *tr);
- void tracer_tracing_off(struct trace_array *tr);
- struct dentry *trace_create_file(const char *name,
- umode_t mode,
- struct dentry *parent,
- void *data,
- const struct file_operations *fops);
- int tracing_init_dentry(void);
- struct ring_buffer_event;
- struct ring_buffer_event *
- trace_buffer_lock_reserve(struct trace_buffer *buffer,
- int type,
- unsigned long len,
- unsigned int trace_ctx);
- int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu);
- struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
- struct trace_array_cpu *data);
- struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
- int *ent_cpu, u64 *ent_ts);
- void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
- struct ring_buffer_event *event);
- bool trace_is_tracepoint_string(const char *str);
- const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
- char *trace_iter_expand_format(struct trace_iterator *iter);
- bool ignore_event(struct trace_iterator *iter);
- int trace_empty(struct trace_iterator *iter);
- void *trace_find_next_entry_inc(struct trace_iterator *iter);
- void trace_init_global_iter(struct trace_iterator *iter);
- void tracing_iter_reset(struct trace_iterator *iter, int cpu);
- unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
- unsigned long trace_total_entries(struct trace_array *tr);
- void trace_function(struct trace_array *tr,
- unsigned long ip,
- unsigned long parent_ip,
- unsigned int trace_ctx);
- void trace_graph_function(struct trace_array *tr,
- unsigned long ip,
- unsigned long parent_ip,
- unsigned int trace_ctx);
- void trace_latency_header(struct seq_file *m);
- void trace_default_header(struct seq_file *m);
- void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
- void trace_graph_return(struct ftrace_graph_ret *trace, struct fgraph_ops *gops);
- int trace_graph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops);
- void tracing_start_cmdline_record(void);
- void tracing_stop_cmdline_record(void);
- void tracing_start_tgid_record(void);
- void tracing_stop_tgid_record(void);
- int register_tracer(struct tracer *type);
- int is_tracing_stopped(void);
- loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
- extern cpumask_var_t __read_mostly tracing_buffer_mask;
- #define for_each_tracing_cpu(cpu) \
- for_each_cpu(cpu, tracing_buffer_mask)
- extern unsigned long nsecs_to_usecs(unsigned long nsecs);
- extern unsigned long tracing_thresh;
- /* PID filtering */
- extern int pid_max;
- bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
- pid_t search_pid);
- bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
- struct trace_pid_list *filtered_no_pids,
- struct task_struct *task);
- void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
- struct task_struct *self,
- struct task_struct *task);
- void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
- void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
- int trace_pid_show(struct seq_file *m, void *v);
- int trace_pid_write(struct trace_pid_list *filtered_pids,
- struct trace_pid_list **new_pid_list,
- const char __user *ubuf, size_t cnt);
- #ifdef CONFIG_TRACER_MAX_TRACE
- void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
- void *cond_data);
- void update_max_tr_single(struct trace_array *tr,
- struct task_struct *tsk, int cpu);
- #ifdef CONFIG_FSNOTIFY
- #define LATENCY_FS_NOTIFY
- #endif
- #endif /* CONFIG_TRACER_MAX_TRACE */
- #ifdef LATENCY_FS_NOTIFY
- void latency_fsnotify(struct trace_array *tr);
- #else
- static inline void latency_fsnotify(struct trace_array *tr) { }
- #endif
- #ifdef CONFIG_STACKTRACE
- void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
- #else
- static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
- int skip)
- {
- }
- #endif /* CONFIG_STACKTRACE */
- void trace_last_func_repeats(struct trace_array *tr,
- struct trace_func_repeats *last_info,
- unsigned int trace_ctx);
- extern u64 ftrace_now(int cpu);
- extern void trace_find_cmdline(int pid, char comm[]);
- extern int trace_find_tgid(int pid);
- extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
- #ifdef CONFIG_DYNAMIC_FTRACE
- extern unsigned long ftrace_update_tot_cnt;
- extern unsigned long ftrace_number_of_pages;
- extern unsigned long ftrace_number_of_groups;
- void ftrace_init_trace_array(struct trace_array *tr);
- #else
- static inline void ftrace_init_trace_array(struct trace_array *tr) { }
- #endif
- #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
- extern int DYN_FTRACE_TEST_NAME(void);
- #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
- extern int DYN_FTRACE_TEST_NAME2(void);
- extern void trace_set_ring_buffer_expanded(struct trace_array *tr);
- extern bool tracing_selftest_disabled;
- #ifdef CONFIG_FTRACE_STARTUP_TEST
- extern void __init disable_tracing_selftest(const char *reason);
- extern int trace_selftest_startup_function(struct tracer *trace,
- struct trace_array *tr);
- extern int trace_selftest_startup_function_graph(struct tracer *trace,
- struct trace_array *tr);
- extern int trace_selftest_startup_irqsoff(struct tracer *trace,
- struct trace_array *tr);
- extern int trace_selftest_startup_preemptoff(struct tracer *trace,
- struct trace_array *tr);
- extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
- struct trace_array *tr);
- extern int trace_selftest_startup_wakeup(struct tracer *trace,
- struct trace_array *tr);
- extern int trace_selftest_startup_nop(struct tracer *trace,
- struct trace_array *tr);
- extern int trace_selftest_startup_branch(struct tracer *trace,
- struct trace_array *tr);
- /*
- * Tracer data references selftest functions that only occur
- * on boot up. These can be __init functions. Thus, when selftests
- * are enabled, then the tracers need to reference __init functions.
- */
- #define __tracer_data __refdata
- #else
- static inline void __init disable_tracing_selftest(const char *reason)
- {
- }
- /* Tracers are seldom changed. Optimize when selftests are disabled. */
- #define __tracer_data __read_mostly
- #endif /* CONFIG_FTRACE_STARTUP_TEST */
- extern void *head_page(struct trace_array_cpu *data);
- extern unsigned long long ns2usecs(u64 nsec);
- __printf(2, 0)
- int trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
- __printf(2, 0)
- int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
- __printf(3, 0)
- int trace_array_vprintk(struct trace_array *tr,
- unsigned long ip, const char *fmt, va_list args);
- __printf(3, 4)
- int trace_array_printk_buf(struct trace_buffer *buffer,
- unsigned long ip, const char *fmt, ...);
- void trace_printk_seq(struct trace_seq *s);
- enum print_line_t print_trace_line(struct trace_iterator *iter);
- extern char trace_find_mark(unsigned long long duration);
- struct ftrace_hash;
- struct ftrace_mod_load {
- struct list_head list;
- char *func;
- char *module;
- int enable;
- };
- enum {
- FTRACE_HASH_FL_MOD = (1 << 0),
- };
- struct ftrace_hash {
- unsigned long size_bits;
- struct hlist_head *buckets;
- unsigned long count;
- unsigned long flags;
- struct rcu_head rcu;
- };
- struct ftrace_func_entry *
- ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
- static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
- {
- return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
- }
- /* Standard output formatting function used for function return traces */
- #ifdef CONFIG_FUNCTION_GRAPH_TRACER
- /* Flag options */
- #define TRACE_GRAPH_PRINT_OVERRUN 0x1
- #define TRACE_GRAPH_PRINT_CPU 0x2
- #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
- #define TRACE_GRAPH_PRINT_PROC 0x8
- #define TRACE_GRAPH_PRINT_DURATION 0x10
- #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
- #define TRACE_GRAPH_PRINT_REL_TIME 0x40
- #define TRACE_GRAPH_PRINT_IRQS 0x80
- #define TRACE_GRAPH_PRINT_TAIL 0x100
- #define TRACE_GRAPH_SLEEP_TIME 0x200
- #define TRACE_GRAPH_GRAPH_TIME 0x400
- #define TRACE_GRAPH_PRINT_RETVAL 0x800
- #define TRACE_GRAPH_PRINT_RETVAL_HEX 0x1000
- #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
- #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
- extern void ftrace_graph_sleep_time_control(bool enable);
- #ifdef CONFIG_FUNCTION_PROFILER
- extern void ftrace_graph_graph_time_control(bool enable);
- #else
- static inline void ftrace_graph_graph_time_control(bool enable) { }
- #endif
- extern enum print_line_t
- print_graph_function_flags(struct trace_iterator *iter, u32 flags);
- extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
- extern void
- trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
- extern void graph_trace_open(struct trace_iterator *iter);
- extern void graph_trace_close(struct trace_iterator *iter);
- extern int __trace_graph_entry(struct trace_array *tr,
- struct ftrace_graph_ent *trace,
- unsigned int trace_ctx);
- extern void __trace_graph_return(struct trace_array *tr,
- struct ftrace_graph_ret *trace,
- unsigned int trace_ctx);
- extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops);
- extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops);
- extern void free_fgraph_ops(struct trace_array *tr);
- enum {
- TRACE_GRAPH_FL = 1,
- /*
- * In the very unlikely case that an interrupt came in
- * at a start of graph tracing, and we want to trace
- * the function in that interrupt, the depth can be greater
- * than zero, because of the preempted start of a previous
- * trace. In an even more unlikely case, depth could be 2
- * if a softirq interrupted the start of graph tracing,
- * followed by an interrupt preempting a start of graph
- * tracing in the softirq, and depth can even be 3
- * if an NMI came in at the start of an interrupt function
- * that preempted a softirq start of a function that
- * preempted normal context!!!! Luckily, it can't be
- * greater than 3, so the next two bits are a mask
- * of what the depth is when we set TRACE_GRAPH_FL
- */
- TRACE_GRAPH_DEPTH_START_BIT,
- TRACE_GRAPH_DEPTH_END_BIT,
- /*
- * To implement set_graph_notrace, if this bit is set, we ignore
- * function graph tracing of called functions, until the return
- * function is called to clear it.
- */
- TRACE_GRAPH_NOTRACE_BIT,
- };
- #define TRACE_GRAPH_NOTRACE (1 << TRACE_GRAPH_NOTRACE_BIT)
- static inline unsigned long ftrace_graph_depth(unsigned long *task_var)
- {
- return (*task_var >> TRACE_GRAPH_DEPTH_START_BIT) & 3;
- }
- static inline void ftrace_graph_set_depth(unsigned long *task_var, int depth)
- {
- *task_var &= ~(3 << TRACE_GRAPH_DEPTH_START_BIT);
- *task_var |= (depth & 3) << TRACE_GRAPH_DEPTH_START_BIT;
- }
- #ifdef CONFIG_DYNAMIC_FTRACE
- extern struct ftrace_hash __rcu *ftrace_graph_hash;
- extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
- static inline int
- ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace)
- {
- unsigned long addr = trace->func;
- int ret = 0;
- struct ftrace_hash *hash;
- preempt_disable_notrace();
- /*
- * Have to open code "rcu_dereference_sched()" because the
- * function graph tracer can be called when RCU is not
- * "watching".
- * Protected with schedule_on_each_cpu(ftrace_sync)
- */
- hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
- if (ftrace_hash_empty(hash)) {
- ret = 1;
- goto out;
- }
- if (ftrace_lookup_ip(hash, addr)) {
- /*
- * This needs to be cleared on the return functions
- * when the depth is zero.
- */
- *task_var |= TRACE_GRAPH_FL;
- ftrace_graph_set_depth(task_var, trace->depth);
- /*
- * If no irqs are to be traced, but a set_graph_function
- * is set, and called by an interrupt handler, we still
- * want to trace it.
- */
- if (in_hardirq())
- trace_recursion_set(TRACE_IRQ_BIT);
- else
- trace_recursion_clear(TRACE_IRQ_BIT);
- ret = 1;
- }
- out:
- preempt_enable_notrace();
- return ret;
- }
- static inline void
- ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace)
- {
- unsigned long *task_var = fgraph_get_task_var(gops);
- if ((*task_var & TRACE_GRAPH_FL) &&
- trace->depth == ftrace_graph_depth(task_var))
- *task_var &= ~TRACE_GRAPH_FL;
- }
- static inline int ftrace_graph_notrace_addr(unsigned long addr)
- {
- int ret = 0;
- struct ftrace_hash *notrace_hash;
- preempt_disable_notrace();
- /*
- * Have to open code "rcu_dereference_sched()" because the
- * function graph tracer can be called when RCU is not
- * "watching".
- * Protected with schedule_on_each_cpu(ftrace_sync)
- */
- notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
- !preemptible());
- if (ftrace_lookup_ip(notrace_hash, addr))
- ret = 1;
- preempt_enable_notrace();
- return ret;
- }
- #else
- static inline int ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace)
- {
- return 1;
- }
- static inline int ftrace_graph_notrace_addr(unsigned long addr)
- {
- return 0;
- }
- static inline void ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace)
- { }
- #endif /* CONFIG_DYNAMIC_FTRACE */
- extern unsigned int fgraph_max_depth;
- static inline bool
- ftrace_graph_ignore_func(struct fgraph_ops *gops, struct ftrace_graph_ent *trace)
- {
- unsigned long *task_var = fgraph_get_task_var(gops);
- /* trace it when it is-nested-in or is a function enabled. */
- return !((*task_var & TRACE_GRAPH_FL) ||
- ftrace_graph_addr(task_var, trace)) ||
- (trace->depth < 0) ||
- (fgraph_max_depth && trace->depth >= fgraph_max_depth);
- }
- void fgraph_init_ops(struct ftrace_ops *dst_ops,
- struct ftrace_ops *src_ops);
- #else /* CONFIG_FUNCTION_GRAPH_TRACER */
- static inline enum print_line_t
- print_graph_function_flags(struct trace_iterator *iter, u32 flags)
- {
- return TRACE_TYPE_UNHANDLED;
- }
- static inline void free_fgraph_ops(struct trace_array *tr) { }
- /* ftrace_ops may not be defined */
- #define init_array_fgraph_ops(tr, ops) do { } while (0)
- #define allocate_fgraph_ops(tr, ops) ({ 0; })
- #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
- extern struct list_head ftrace_pids;
- #ifdef CONFIG_FUNCTION_TRACER
- #define FTRACE_PID_IGNORE -1
- #define FTRACE_PID_TRACE -2
- struct ftrace_func_command {
- struct list_head list;
- char *name;
- int (*func)(struct trace_array *tr,
- struct ftrace_hash *hash,
- char *func, char *cmd,
- char *params, int enable);
- };
- extern bool ftrace_filter_param __initdata;
- static inline int ftrace_trace_task(struct trace_array *tr)
- {
- return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
- FTRACE_PID_IGNORE;
- }
- extern int ftrace_is_dead(void);
- int ftrace_create_function_files(struct trace_array *tr,
- struct dentry *parent);
- void ftrace_destroy_function_files(struct trace_array *tr);
- int ftrace_allocate_ftrace_ops(struct trace_array *tr);
- void ftrace_free_ftrace_ops(struct trace_array *tr);
- void ftrace_init_global_array_ops(struct trace_array *tr);
- void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
- void ftrace_reset_array_ops(struct trace_array *tr);
- void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
- void ftrace_init_tracefs_toplevel(struct trace_array *tr,
- struct dentry *d_tracer);
- void ftrace_clear_pids(struct trace_array *tr);
- int init_function_trace(void);
- void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
- #else
- static inline int ftrace_trace_task(struct trace_array *tr)
- {
- return 1;
- }
- static inline int ftrace_is_dead(void) { return 0; }
- static inline int
- ftrace_create_function_files(struct trace_array *tr,
- struct dentry *parent)
- {
- return 0;
- }
- static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
- {
- return 0;
- }
- static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
- static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
- static inline __init void
- ftrace_init_global_array_ops(struct trace_array *tr) { }
- static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
- static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
- static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
- static inline void ftrace_clear_pids(struct trace_array *tr) { }
- static inline int init_function_trace(void) { return 0; }
- static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
- /* ftace_func_t type is not defined, use macro instead of static inline */
- #define ftrace_init_array_ops(tr, func) do { } while (0)
- #endif /* CONFIG_FUNCTION_TRACER */
- #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
- struct ftrace_probe_ops {
- void (*func)(unsigned long ip,
- unsigned long parent_ip,
- struct trace_array *tr,
- struct ftrace_probe_ops *ops,
- void *data);
- int (*init)(struct ftrace_probe_ops *ops,
- struct trace_array *tr,
- unsigned long ip, void *init_data,
- void **data);
- void (*free)(struct ftrace_probe_ops *ops,
- struct trace_array *tr,
- unsigned long ip, void *data);
- int (*print)(struct seq_file *m,
- unsigned long ip,
- struct ftrace_probe_ops *ops,
- void *data);
- };
- struct ftrace_func_mapper;
- typedef int (*ftrace_mapper_func)(void *data);
- struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
- void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
- unsigned long ip);
- int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
- unsigned long ip, void *data);
- void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
- unsigned long ip);
- void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
- ftrace_mapper_func free_func);
- extern int
- register_ftrace_function_probe(char *glob, struct trace_array *tr,
- struct ftrace_probe_ops *ops, void *data);
- extern int
- unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
- struct ftrace_probe_ops *ops);
- extern void clear_ftrace_function_probes(struct trace_array *tr);
- int register_ftrace_command(struct ftrace_func_command *cmd);
- int unregister_ftrace_command(struct ftrace_func_command *cmd);
- void ftrace_create_filter_files(struct ftrace_ops *ops,
- struct dentry *parent);
- void ftrace_destroy_filter_files(struct ftrace_ops *ops);
- extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
- int len, int reset);
- extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
- int len, int reset);
- #else
- struct ftrace_func_command;
- static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
- {
- return -EINVAL;
- }
- static inline __init int unregister_ftrace_command(char *cmd_name)
- {
- return -EINVAL;
- }
- static inline void clear_ftrace_function_probes(struct trace_array *tr)
- {
- }
- /*
- * The ops parameter passed in is usually undefined.
- * This must be a macro.
- */
- #define ftrace_create_filter_files(ops, parent) do { } while (0)
- #define ftrace_destroy_filter_files(ops) do { } while (0)
- #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
- bool ftrace_event_is_function(struct trace_event_call *call);
- /*
- * struct trace_parser - servers for reading the user input separated by spaces
- * @cont: set if the input is not complete - no final space char was found
- * @buffer: holds the parsed user input
- * @idx: user input length
- * @size: buffer size
- */
- struct trace_parser {
- bool cont;
- bool fail;
- char *buffer;
- unsigned idx;
- unsigned size;
- };
- static inline bool trace_parser_loaded(struct trace_parser *parser)
- {
- return !parser->fail && parser->idx != 0;
- }
- static inline bool trace_parser_cont(struct trace_parser *parser)
- {
- return parser->cont;
- }
- static inline void trace_parser_clear(struct trace_parser *parser)
- {
- parser->cont = false;
- parser->idx = 0;
- }
- static inline void trace_parser_fail(struct trace_parser *parser)
- {
- parser->fail = true;
- }
- extern int trace_parser_get_init(struct trace_parser *parser, int size);
- extern void trace_parser_put(struct trace_parser *parser);
- extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
- size_t cnt, loff_t *ppos);
- /*
- * Only create function graph options if function graph is configured.
- */
- #ifdef CONFIG_FUNCTION_GRAPH_TRACER
- # define FGRAPH_FLAGS \
- C(DISPLAY_GRAPH, "display-graph"),
- #else
- # define FGRAPH_FLAGS
- #endif
- #ifdef CONFIG_BRANCH_TRACER
- # define BRANCH_FLAGS \
- C(BRANCH, "branch"),
- #else
- # define BRANCH_FLAGS
- #endif
- #ifdef CONFIG_FUNCTION_TRACER
- # define FUNCTION_FLAGS \
- C(FUNCTION, "function-trace"), \
- C(FUNC_FORK, "function-fork"),
- # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
- #else
- # define FUNCTION_FLAGS
- # define FUNCTION_DEFAULT_FLAGS 0UL
- # define TRACE_ITER_FUNC_FORK 0UL
- #endif
- #ifdef CONFIG_STACKTRACE
- # define STACK_FLAGS \
- C(STACKTRACE, "stacktrace"),
- #else
- # define STACK_FLAGS
- #endif
- /*
- * trace_iterator_flags is an enumeration that defines bit
- * positions into trace_flags that controls the output.
- *
- * NOTE: These bits must match the trace_options array in
- * trace.c (this macro guarantees it).
- */
- #define TRACE_FLAGS \
- C(PRINT_PARENT, "print-parent"), \
- C(SYM_OFFSET, "sym-offset"), \
- C(SYM_ADDR, "sym-addr"), \
- C(VERBOSE, "verbose"), \
- C(RAW, "raw"), \
- C(HEX, "hex"), \
- C(BIN, "bin"), \
- C(BLOCK, "block"), \
- C(FIELDS, "fields"), \
- C(PRINTK, "trace_printk"), \
- C(ANNOTATE, "annotate"), \
- C(USERSTACKTRACE, "userstacktrace"), \
- C(SYM_USEROBJ, "sym-userobj"), \
- C(PRINTK_MSGONLY, "printk-msg-only"), \
- C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
- C(LATENCY_FMT, "latency-format"), \
- C(RECORD_CMD, "record-cmd"), \
- C(RECORD_TGID, "record-tgid"), \
- C(OVERWRITE, "overwrite"), \
- C(STOP_ON_FREE, "disable_on_free"), \
- C(IRQ_INFO, "irq-info"), \
- C(MARKERS, "markers"), \
- C(EVENT_FORK, "event-fork"), \
- C(TRACE_PRINTK, "trace_printk_dest"), \
- C(PAUSE_ON_TRACE, "pause-on-trace"), \
- C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \
- FUNCTION_FLAGS \
- FGRAPH_FLAGS \
- STACK_FLAGS \
- BRANCH_FLAGS
- /*
- * By defining C, we can make TRACE_FLAGS a list of bit names
- * that will define the bits for the flag masks.
- */
- #undef C
- #define C(a, b) TRACE_ITER_##a##_BIT
- enum trace_iterator_bits {
- TRACE_FLAGS
- /* Make sure we don't go more than we have bits for */
- TRACE_ITER_LAST_BIT
- };
- /*
- * By redefining C, we can make TRACE_FLAGS a list of masks that
- * use the bits as defined above.
- */
- #undef C
- #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
- enum trace_iterator_flags { TRACE_FLAGS };
- /*
- * TRACE_ITER_SYM_MASK masks the options in trace_flags that
- * control the output of kernel symbols.
- */
- #define TRACE_ITER_SYM_MASK \
- (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
- extern struct tracer nop_trace;
- #ifdef CONFIG_BRANCH_TRACER
- extern int enable_branch_tracing(struct trace_array *tr);
- extern void disable_branch_tracing(void);
- static inline int trace_branch_enable(struct trace_array *tr)
- {
- if (tr->trace_flags & TRACE_ITER_BRANCH)
- return enable_branch_tracing(tr);
- return 0;
- }
- static inline void trace_branch_disable(void)
- {
- /* due to races, always disable */
- disable_branch_tracing();
- }
- #else
- static inline int trace_branch_enable(struct trace_array *tr)
- {
- return 0;
- }
- static inline void trace_branch_disable(void)
- {
- }
- #endif /* CONFIG_BRANCH_TRACER */
- /* set ring buffers to default size if not already done so */
- int tracing_update_buffers(struct trace_array *tr);
- union trace_synth_field {
- u8 as_u8;
- u16 as_u16;
- u32 as_u32;
- u64 as_u64;
- struct trace_dynamic_info as_dynamic;
- };
- struct ftrace_event_field {
- struct list_head link;
- const char *name;
- const char *type;
- int filter_type;
- int offset;
- int size;
- unsigned int is_signed:1;
- unsigned int needs_test:1;
- int len;
- };
- struct prog_entry;
- struct event_filter {
- struct prog_entry __rcu *prog;
- char *filter_string;
- };
- struct event_subsystem {
- struct list_head list;
- const char *name;
- struct event_filter *filter;
- int ref_count;
- };
- struct trace_subsystem_dir {
- struct list_head list;
- struct event_subsystem *subsystem;
- struct trace_array *tr;
- struct eventfs_inode *ei;
- int ref_count;
- int nr_events;
- };
- extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
- struct trace_buffer *buffer,
- struct ring_buffer_event *event);
- void trace_buffer_unlock_commit_regs(struct trace_array *tr,
- struct trace_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned int trcace_ctx,
- struct pt_regs *regs);
- static inline void trace_buffer_unlock_commit(struct trace_array *tr,
- struct trace_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned int trace_ctx)
- {
- trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
- }
- DECLARE_PER_CPU(bool, trace_taskinfo_save);
- int trace_save_cmdline(struct task_struct *tsk);
- int trace_create_savedcmd(void);
- int trace_alloc_tgid_map(void);
- void trace_free_saved_cmdlines_buffer(void);
- extern const struct file_operations tracing_saved_cmdlines_fops;
- extern const struct file_operations tracing_saved_tgids_fops;
- extern const struct file_operations tracing_saved_cmdlines_size_fops;
- DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
- DECLARE_PER_CPU(int, trace_buffered_event_cnt);
- void trace_buffered_event_disable(void);
- void trace_buffered_event_enable(void);
- void early_enable_events(struct trace_array *tr, char *buf, bool disable_first);
- static inline void
- __trace_event_discard_commit(struct trace_buffer *buffer,
- struct ring_buffer_event *event)
- {
- if (this_cpu_read(trace_buffered_event) == event) {
- /* Simply release the temp buffer and enable preemption */
- this_cpu_dec(trace_buffered_event_cnt);
- preempt_enable_notrace();
- return;
- }
- /* ring_buffer_discard_commit() enables preemption */
- ring_buffer_discard_commit(buffer, event);
- }
- /*
- * Helper function for event_trigger_unlock_commit{_regs}().
- * If there are event triggers attached to this event that requires
- * filtering against its fields, then they will be called as the
- * entry already holds the field information of the current event.
- *
- * It also checks if the event should be discarded or not.
- * It is to be discarded if the event is soft disabled and the
- * event was only recorded to process triggers, or if the event
- * filter is active and this event did not match the filters.
- *
- * Returns true if the event is discarded, false otherwise.
- */
- static inline bool
- __event_trigger_test_discard(struct trace_event_file *file,
- struct trace_buffer *buffer,
- struct ring_buffer_event *event,
- void *entry,
- enum event_trigger_type *tt)
- {
- unsigned long eflags = file->flags;
- if (eflags & EVENT_FILE_FL_TRIGGER_COND)
- *tt = event_triggers_call(file, buffer, entry, event);
- if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
- EVENT_FILE_FL_FILTERED |
- EVENT_FILE_FL_PID_FILTER))))
- return false;
- if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
- goto discard;
- if (file->flags & EVENT_FILE_FL_FILTERED &&
- !filter_match_preds(file->filter, entry))
- goto discard;
- if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
- trace_event_ignore_this_pid(file))
- goto discard;
- return false;
- discard:
- __trace_event_discard_commit(buffer, event);
- return true;
- }
- /**
- * event_trigger_unlock_commit - handle triggers and finish event commit
- * @file: The file pointer associated with the event
- * @buffer: The ring buffer that the event is being written to
- * @event: The event meta data in the ring buffer
- * @entry: The event itself
- * @trace_ctx: The tracing context flags.
- *
- * This is a helper function to handle triggers that require data
- * from the event itself. It also tests the event against filters and
- * if the event is soft disabled and should be discarded.
- */
- static inline void
- event_trigger_unlock_commit(struct trace_event_file *file,
- struct trace_buffer *buffer,
- struct ring_buffer_event *event,
- void *entry, unsigned int trace_ctx)
- {
- enum event_trigger_type tt = ETT_NONE;
- if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
- trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
- if (tt)
- event_triggers_post_call(file, tt);
- }
- #define FILTER_PRED_INVALID ((unsigned short)-1)
- #define FILTER_PRED_IS_RIGHT (1 << 15)
- #define FILTER_PRED_FOLD (1 << 15)
- /*
- * The max preds is the size of unsigned short with
- * two flags at the MSBs. One bit is used for both the IS_RIGHT
- * and FOLD flags. The other is reserved.
- *
- * 2^14 preds is way more than enough.
- */
- #define MAX_FILTER_PRED 16384
- struct filter_pred;
- struct regex;
- typedef int (*regex_match_func)(char *str, struct regex *r, int len);
- enum regex_type {
- MATCH_FULL = 0,
- MATCH_FRONT_ONLY,
- MATCH_MIDDLE_ONLY,
- MATCH_END_ONLY,
- MATCH_GLOB,
- MATCH_INDEX,
- };
- struct regex {
- char pattern[MAX_FILTER_STR_VAL];
- int len;
- int field_len;
- regex_match_func match;
- };
- static inline bool is_string_field(struct ftrace_event_field *field)
- {
- return field->filter_type == FILTER_DYN_STRING ||
- field->filter_type == FILTER_RDYN_STRING ||
- field->filter_type == FILTER_STATIC_STRING ||
- field->filter_type == FILTER_PTR_STRING ||
- field->filter_type == FILTER_COMM;
- }
- static inline bool is_function_field(struct ftrace_event_field *field)
- {
- return field->filter_type == FILTER_TRACE_FN;
- }
- extern enum regex_type
- filter_parse_regex(char *buff, int len, char **search, int *not);
- extern void print_event_filter(struct trace_event_file *file,
- struct trace_seq *s);
- extern int apply_event_filter(struct trace_event_file *file,
- char *filter_string);
- extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
- char *filter_string);
- extern void print_subsystem_event_filter(struct event_subsystem *system,
- struct trace_seq *s);
- extern int filter_assign_type(const char *type);
- extern int create_event_filter(struct trace_array *tr,
- struct trace_event_call *call,
- char *filter_str, bool set_str,
- struct event_filter **filterp);
- extern void free_event_filter(struct event_filter *filter);
- struct ftrace_event_field *
- trace_find_event_field(struct trace_event_call *call, char *name);
- extern void trace_event_enable_cmd_record(bool enable);
- extern void trace_event_enable_tgid_record(bool enable);
- extern int event_trace_init(void);
- extern int init_events(void);
- extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
- extern int event_trace_del_tracer(struct trace_array *tr);
- extern void __trace_early_add_events(struct trace_array *tr);
- extern struct trace_event_file *__find_event_file(struct trace_array *tr,
- const char *system,
- const char *event);
- extern struct trace_event_file *find_event_file(struct trace_array *tr,
- const char *system,
- const char *event);
- static inline void *event_file_data(struct file *filp)
- {
- return READ_ONCE(file_inode(filp)->i_private);
- }
- extern struct mutex event_mutex;
- extern struct list_head ftrace_events;
- /*
- * When the trace_event_file is the filp->i_private pointer,
- * it must be taken under the event_mutex lock, and then checked
- * if the EVENT_FILE_FL_FREED flag is set. If it is, then the
- * data pointed to by the trace_event_file can not be trusted.
- *
- * Use the event_file_file() to access the trace_event_file from
- * the filp the first time under the event_mutex and check for
- * NULL. If it is needed to be retrieved again and the event_mutex
- * is still held, then the event_file_data() can be used and it
- * is guaranteed to be valid.
- */
- static inline struct trace_event_file *event_file_file(struct file *filp)
- {
- struct trace_event_file *file;
- lockdep_assert_held(&event_mutex);
- file = READ_ONCE(file_inode(filp)->i_private);
- if (!file || file->flags & EVENT_FILE_FL_FREED)
- return NULL;
- return file;
- }
- extern const struct file_operations event_trigger_fops;
- extern const struct file_operations event_hist_fops;
- extern const struct file_operations event_hist_debug_fops;
- extern const struct file_operations event_inject_fops;
- #ifdef CONFIG_HIST_TRIGGERS
- extern int register_trigger_hist_cmd(void);
- extern int register_trigger_hist_enable_disable_cmds(void);
- #else
- static inline int register_trigger_hist_cmd(void) { return 0; }
- static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
- #endif
- extern int register_trigger_cmds(void);
- extern void clear_event_triggers(struct trace_array *tr);
- enum {
- EVENT_TRIGGER_FL_PROBE = BIT(0),
- };
- struct event_trigger_data {
- unsigned long count;
- int ref;
- int flags;
- struct event_trigger_ops *ops;
- struct event_command *cmd_ops;
- struct event_filter __rcu *filter;
- char *filter_str;
- void *private_data;
- bool paused;
- bool paused_tmp;
- struct list_head list;
- char *name;
- struct list_head named_list;
- struct event_trigger_data *named_data;
- };
- /* Avoid typos */
- #define ENABLE_EVENT_STR "enable_event"
- #define DISABLE_EVENT_STR "disable_event"
- #define ENABLE_HIST_STR "enable_hist"
- #define DISABLE_HIST_STR "disable_hist"
- struct enable_trigger_data {
- struct trace_event_file *file;
- bool enable;
- bool hist;
- };
- extern int event_enable_trigger_print(struct seq_file *m,
- struct event_trigger_data *data);
- extern void event_enable_trigger_free(struct event_trigger_data *data);
- extern int event_enable_trigger_parse(struct event_command *cmd_ops,
- struct trace_event_file *file,
- char *glob, char *cmd,
- char *param_and_filter);
- extern int event_enable_register_trigger(char *glob,
- struct event_trigger_data *data,
- struct trace_event_file *file);
- extern void event_enable_unregister_trigger(char *glob,
- struct event_trigger_data *test,
- struct trace_event_file *file);
- extern struct event_trigger_data *
- trigger_data_alloc(struct event_command *cmd_ops, char *cmd, char *param,
- void *private_data);
- extern void trigger_data_free(struct event_trigger_data *data);
- extern int event_trigger_init(struct event_trigger_data *data);
- extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
- int trigger_enable);
- extern void update_cond_flag(struct trace_event_file *file);
- extern int set_trigger_filter(char *filter_str,
- struct event_trigger_data *trigger_data,
- struct trace_event_file *file);
- extern struct event_trigger_data *find_named_trigger(const char *name);
- extern bool is_named_trigger(struct event_trigger_data *test);
- extern int save_named_trigger(const char *name,
- struct event_trigger_data *data);
- extern void del_named_trigger(struct event_trigger_data *data);
- extern void pause_named_trigger(struct event_trigger_data *data);
- extern void unpause_named_trigger(struct event_trigger_data *data);
- extern void set_named_trigger_data(struct event_trigger_data *data,
- struct event_trigger_data *named_data);
- extern struct event_trigger_data *
- get_named_trigger_data(struct event_trigger_data *data);
- extern int register_event_command(struct event_command *cmd);
- extern int unregister_event_command(struct event_command *cmd);
- extern int register_trigger_hist_enable_disable_cmds(void);
- extern bool event_trigger_check_remove(const char *glob);
- extern bool event_trigger_empty_param(const char *param);
- extern int event_trigger_separate_filter(char *param_and_filter, char **param,
- char **filter, bool param_required);
- extern int event_trigger_parse_num(char *trigger,
- struct event_trigger_data *trigger_data);
- extern int event_trigger_set_filter(struct event_command *cmd_ops,
- struct trace_event_file *file,
- char *param,
- struct event_trigger_data *trigger_data);
- extern void event_trigger_reset_filter(struct event_command *cmd_ops,
- struct event_trigger_data *trigger_data);
- extern int event_trigger_register(struct event_command *cmd_ops,
- struct trace_event_file *file,
- char *glob,
- struct event_trigger_data *trigger_data);
- extern void event_trigger_unregister(struct event_command *cmd_ops,
- struct trace_event_file *file,
- char *glob,
- struct event_trigger_data *trigger_data);
- extern void event_file_get(struct trace_event_file *file);
- extern void event_file_put(struct trace_event_file *file);
- /**
- * struct event_trigger_ops - callbacks for trace event triggers
- *
- * The methods in this structure provide per-event trigger hooks for
- * various trigger operations.
- *
- * The @init and @free methods are used during trigger setup and
- * teardown, typically called from an event_command's @parse()
- * function implementation.
- *
- * The @print method is used to print the trigger spec.
- *
- * The @trigger method is the function that actually implements the
- * trigger and is called in the context of the triggering event
- * whenever that event occurs.
- *
- * All the methods below, except for @init() and @free(), must be
- * implemented.
- *
- * @trigger: The trigger 'probe' function called when the triggering
- * event occurs. The data passed into this callback is the data
- * that was supplied to the event_command @reg() function that
- * registered the trigger (see struct event_command) along with
- * the trace record, rec.
- *
- * @init: An optional initialization function called for the trigger
- * when the trigger is registered (via the event_command reg()
- * function). This can be used to perform per-trigger
- * initialization such as incrementing a per-trigger reference
- * count, for instance. This is usually implemented by the
- * generic utility function @event_trigger_init() (see
- * trace_event_triggers.c).
- *
- * @free: An optional de-initialization function called for the
- * trigger when the trigger is unregistered (via the
- * event_command @reg() function). This can be used to perform
- * per-trigger de-initialization such as decrementing a
- * per-trigger reference count and freeing corresponding trigger
- * data, for instance. This is usually implemented by the
- * generic utility function @event_trigger_free() (see
- * trace_event_triggers.c).
- *
- * @print: The callback function invoked to have the trigger print
- * itself. This is usually implemented by a wrapper function
- * that calls the generic utility function @event_trigger_print()
- * (see trace_event_triggers.c).
- */
- struct event_trigger_ops {
- void (*trigger)(struct event_trigger_data *data,
- struct trace_buffer *buffer,
- void *rec,
- struct ring_buffer_event *rbe);
- int (*init)(struct event_trigger_data *data);
- void (*free)(struct event_trigger_data *data);
- int (*print)(struct seq_file *m,
- struct event_trigger_data *data);
- };
- /**
- * struct event_command - callbacks and data members for event commands
- *
- * Event commands are invoked by users by writing the command name
- * into the 'trigger' file associated with a trace event. The
- * parameters associated with a specific invocation of an event
- * command are used to create an event trigger instance, which is
- * added to the list of trigger instances associated with that trace
- * event. When the event is hit, the set of triggers associated with
- * that event is invoked.
- *
- * The data members in this structure provide per-event command data
- * for various event commands.
- *
- * All the data members below, except for @post_trigger, must be set
- * for each event command.
- *
- * @name: The unique name that identifies the event command. This is
- * the name used when setting triggers via trigger files.
- *
- * @trigger_type: A unique id that identifies the event command
- * 'type'. This value has two purposes, the first to ensure that
- * only one trigger of the same type can be set at a given time
- * for a particular event e.g. it doesn't make sense to have both
- * a traceon and traceoff trigger attached to a single event at
- * the same time, so traceon and traceoff have the same type
- * though they have different names. The @trigger_type value is
- * also used as a bit value for deferring the actual trigger
- * action until after the current event is finished. Some
- * commands need to do this if they themselves log to the trace
- * buffer (see the @post_trigger() member below). @trigger_type
- * values are defined by adding new values to the trigger_type
- * enum in include/linux/trace_events.h.
- *
- * @flags: See the enum event_command_flags below.
- *
- * All the methods below, except for @set_filter() and @unreg_all(),
- * must be implemented.
- *
- * @parse: The callback function responsible for parsing and
- * registering the trigger written to the 'trigger' file by the
- * user. It allocates the trigger instance and registers it with
- * the appropriate trace event. It makes use of the other
- * event_command callback functions to orchestrate this, and is
- * usually implemented by the generic utility function
- * @event_trigger_callback() (see trace_event_triggers.c).
- *
- * @reg: Adds the trigger to the list of triggers associated with the
- * event, and enables the event trigger itself, after
- * initializing it (via the event_trigger_ops @init() function).
- * This is also where commands can use the @trigger_type value to
- * make the decision as to whether or not multiple instances of
- * the trigger should be allowed. This is usually implemented by
- * the generic utility function @register_trigger() (see
- * trace_event_triggers.c).
- *
- * @unreg: Removes the trigger from the list of triggers associated
- * with the event, and disables the event trigger itself, after
- * initializing it (via the event_trigger_ops @free() function).
- * This is usually implemented by the generic utility function
- * @unregister_trigger() (see trace_event_triggers.c).
- *
- * @unreg_all: An optional function called to remove all the triggers
- * from the list of triggers associated with the event. Called
- * when a trigger file is opened in truncate mode.
- *
- * @set_filter: An optional function called to parse and set a filter
- * for the trigger. If no @set_filter() method is set for the
- * event command, filters set by the user for the command will be
- * ignored. This is usually implemented by the generic utility
- * function @set_trigger_filter() (see trace_event_triggers.c).
- *
- * @get_trigger_ops: The callback function invoked to retrieve the
- * event_trigger_ops implementation associated with the command.
- * This callback function allows a single event_command to
- * support multiple trigger implementations via different sets of
- * event_trigger_ops, depending on the value of the @param
- * string.
- */
- struct event_command {
- struct list_head list;
- char *name;
- enum event_trigger_type trigger_type;
- int flags;
- int (*parse)(struct event_command *cmd_ops,
- struct trace_event_file *file,
- char *glob, char *cmd,
- char *param_and_filter);
- int (*reg)(char *glob,
- struct event_trigger_data *data,
- struct trace_event_file *file);
- void (*unreg)(char *glob,
- struct event_trigger_data *data,
- struct trace_event_file *file);
- void (*unreg_all)(struct trace_event_file *file);
- int (*set_filter)(char *filter_str,
- struct event_trigger_data *data,
- struct trace_event_file *file);
- struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
- };
- /**
- * enum event_command_flags - flags for struct event_command
- *
- * @POST_TRIGGER: A flag that says whether or not this command needs
- * to have its action delayed until after the current event has
- * been closed. Some triggers need to avoid being invoked while
- * an event is currently in the process of being logged, since
- * the trigger may itself log data into the trace buffer. Thus
- * we make sure the current event is committed before invoking
- * those triggers. To do that, the trigger invocation is split
- * in two - the first part checks the filter using the current
- * trace record; if a command has the @post_trigger flag set, it
- * sets a bit for itself in the return value, otherwise it
- * directly invokes the trigger. Once all commands have been
- * either invoked or set their return flag, the current record is
- * either committed or discarded. At that point, if any commands
- * have deferred their triggers, those commands are finally
- * invoked following the close of the current event. In other
- * words, if the event_trigger_ops @func() probe implementation
- * itself logs to the trace buffer, this flag should be set,
- * otherwise it can be left unspecified.
- *
- * @NEEDS_REC: A flag that says whether or not this command needs
- * access to the trace record in order to perform its function,
- * regardless of whether or not it has a filter associated with
- * it (filters make a trigger require access to the trace record
- * but are not always present).
- */
- enum event_command_flags {
- EVENT_CMD_FL_POST_TRIGGER = 1,
- EVENT_CMD_FL_NEEDS_REC = 2,
- };
- static inline bool event_command_post_trigger(struct event_command *cmd_ops)
- {
- return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
- }
- static inline bool event_command_needs_rec(struct event_command *cmd_ops)
- {
- return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
- }
- extern int trace_event_enable_disable(struct trace_event_file *file,
- int enable, int soft_disable);
- extern int tracing_alloc_snapshot(void);
- extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
- extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
- extern int tracing_snapshot_cond_disable(struct trace_array *tr);
- extern void *tracing_cond_snapshot_data(struct trace_array *tr);
- extern const char *__start___trace_bprintk_fmt[];
- extern const char *__stop___trace_bprintk_fmt[];
- extern const char *__start___tracepoint_str[];
- extern const char *__stop___tracepoint_str[];
- void trace_printk_control(bool enabled);
- void trace_printk_start_comm(void);
- int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
- int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
- /* Used from boot time tracer */
- extern int trace_set_options(struct trace_array *tr, char *option);
- extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
- extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
- unsigned long size, int cpu_id);
- extern int tracing_set_cpumask(struct trace_array *tr,
- cpumask_var_t tracing_cpumask_new);
- #define MAX_EVENT_NAME_LEN 64
- extern ssize_t trace_parse_run_command(struct file *file,
- const char __user *buffer, size_t count, loff_t *ppos,
- int (*createfn)(const char *));
- extern unsigned int err_pos(char *cmd, const char *str);
- extern void tracing_log_err(struct trace_array *tr,
- const char *loc, const char *cmd,
- const char **errs, u8 type, u16 pos);
- /*
- * Normal trace_printk() and friends allocates special buffers
- * to do the manipulation, as well as saves the print formats
- * into sections to display. But the trace infrastructure wants
- * to use these without the added overhead at the price of being
- * a bit slower (used mainly for warnings, where we don't care
- * about performance). The internal_trace_puts() is for such
- * a purpose.
- */
- #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
- #undef FTRACE_ENTRY
- #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
- extern struct trace_event_call \
- __aligned(4) event_##call;
- #undef FTRACE_ENTRY_DUP
- #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
- FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
- #undef FTRACE_ENTRY_PACKED
- #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
- FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
- #include "trace_entries.h"
- #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
- int perf_ftrace_event_register(struct trace_event_call *call,
- enum trace_reg type, void *data);
- #else
- #define perf_ftrace_event_register NULL
- #endif
- #ifdef CONFIG_FTRACE_SYSCALLS
- void init_ftrace_syscalls(void);
- const char *get_syscall_name(int syscall);
- #else
- static inline void init_ftrace_syscalls(void) { }
- static inline const char *get_syscall_name(int syscall)
- {
- return NULL;
- }
- #endif
- #ifdef CONFIG_EVENT_TRACING
- void trace_event_init(void);
- void trace_event_eval_update(struct trace_eval_map **map, int len);
- /* Used from boot time tracer */
- extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
- extern int trigger_process_regex(struct trace_event_file *file, char *buff);
- #else
- static inline void __init trace_event_init(void) { }
- static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
- #endif
- #ifdef CONFIG_TRACER_SNAPSHOT
- void tracing_snapshot_instance(struct trace_array *tr);
- int tracing_alloc_snapshot_instance(struct trace_array *tr);
- int tracing_arm_snapshot(struct trace_array *tr);
- void tracing_disarm_snapshot(struct trace_array *tr);
- #else
- static inline void tracing_snapshot_instance(struct trace_array *tr) { }
- static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
- {
- return 0;
- }
- static inline int tracing_arm_snapshot(struct trace_array *tr) { return 0; }
- static inline void tracing_disarm_snapshot(struct trace_array *tr) { }
- #endif
- #ifdef CONFIG_PREEMPT_TRACER
- void tracer_preempt_on(unsigned long a0, unsigned long a1);
- void tracer_preempt_off(unsigned long a0, unsigned long a1);
- #else
- static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
- static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
- #endif
- #ifdef CONFIG_IRQSOFF_TRACER
- void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
- void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
- #else
- static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
- static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
- #endif
- /*
- * Reset the state of the trace_iterator so that it can read consumed data.
- * Normally, the trace_iterator is used for reading the data when it is not
- * consumed, and must retain state.
- */
- static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
- {
- memset_startat(iter, 0, seq);
- iter->pos = -1;
- }
- /* Check the name is good for event/group/fields */
- static inline bool __is_good_name(const char *name, bool hash_ok)
- {
- if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-'))
- return false;
- while (*++name != '\0') {
- if (!isalpha(*name) && !isdigit(*name) && *name != '_' &&
- (!hash_ok || *name != '-'))
- return false;
- }
- return true;
- }
- /* Check the name is good for event/group/fields */
- static inline bool is_good_name(const char *name)
- {
- return __is_good_name(name, false);
- }
- /* Check the name is good for system */
- static inline bool is_good_system_name(const char *name)
- {
- return __is_good_name(name, true);
- }
- /* Convert certain expected symbols into '_' when generating event names */
- static inline void sanitize_event_name(char *name)
- {
- while (*name++ != '\0')
- if (*name == ':' || *name == '.' || *name == '*')
- *name = '_';
- }
- /*
- * This is a generic way to read and write a u64 value from a file in tracefs.
- *
- * The value is stored on the variable pointed by *val. The value needs
- * to be at least *min and at most *max. The write is protected by an
- * existing *lock.
- */
- struct trace_min_max_param {
- struct mutex *lock;
- u64 *val;
- u64 *min;
- u64 *max;
- };
- #define U64_STR_SIZE 24 /* 20 digits max */
- extern const struct file_operations trace_min_max_fops;
- #ifdef CONFIG_RV
- extern int rv_init_interface(void);
- #else
- static inline int rv_init_interface(void)
- {
- return 0;
- }
- #endif
- /*
- * This is used only to distinguish
- * function address from trampoline code.
- * So this value has no meaning.
- */
- #define FTRACE_TRAMPOLINE_MARKER ((unsigned long) INT_MAX)
- #endif /* _LINUX_KERNEL_TRACE_H */
|