sched.h 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914
  1. #undef TRACE_SYSTEM
  2. #define TRACE_SYSTEM sched
  3. #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
  4. #define _TRACE_SCHED_H
  5. #include <linux/sched.h>
  6. #include <linux/tracepoint.h>
  7. #include <linux/binfmts.h>
  8. struct rq;
  9. /*
  10. * Tracepoint for calling kthread_stop, performed to end a kthread:
  11. */
  12. TRACE_EVENT(sched_kthread_stop,
  13. TP_PROTO(struct task_struct *t),
  14. TP_ARGS(t),
  15. TP_STRUCT__entry(
  16. __array( char, comm, TASK_COMM_LEN )
  17. __field( pid_t, pid )
  18. ),
  19. TP_fast_assign(
  20. memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
  21. __entry->pid = t->pid;
  22. ),
  23. TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
  24. );
  25. /*
  26. * Tracepoint for the return value of the kthread stopping:
  27. */
  28. TRACE_EVENT(sched_kthread_stop_ret,
  29. TP_PROTO(int ret),
  30. TP_ARGS(ret),
  31. TP_STRUCT__entry(
  32. __field( int, ret )
  33. ),
  34. TP_fast_assign(
  35. __entry->ret = ret;
  36. ),
  37. TP_printk("ret=%d", __entry->ret)
  38. );
  39. /*
  40. * Tracepoint for task enqueue/dequeue:
  41. */
  42. TRACE_EVENT(sched_enq_deq_task,
  43. TP_PROTO(struct task_struct *p, bool enqueue, unsigned int cpus_allowed),
  44. TP_ARGS(p, enqueue, cpus_allowed),
  45. TP_STRUCT__entry(
  46. __array( char, comm, TASK_COMM_LEN )
  47. __field( pid_t, pid )
  48. __field( int, prio )
  49. __field( int, cpu )
  50. __field( bool, enqueue )
  51. __field(unsigned int, nr_running )
  52. __field(unsigned long, cpu_load )
  53. __field(unsigned int, rt_nr_running )
  54. __field(unsigned int, cpus_allowed )
  55. __field(unsigned int, demand )
  56. __field(unsigned int, pred_demand )
  57. ),
  58. TP_fast_assign(
  59. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  60. __entry->pid = p->pid;
  61. __entry->prio = p->prio;
  62. __entry->cpu = task_cpu(p);
  63. __entry->enqueue = enqueue;
  64. __entry->nr_running = task_rq(p)->nr_running;
  65. __entry->cpu_load = task_rq(p)->cpu_load[0];
  66. __entry->rt_nr_running = task_rq(p)->rt.rt_nr_running;
  67. __entry->cpus_allowed = cpus_allowed;
  68. __entry->demand = task_load(p);
  69. __entry->pred_demand = task_pl(p);
  70. ),
  71. TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x demand=%u pred_demand=%u",
  72. __entry->cpu,
  73. __entry->enqueue ? "enqueue" : "dequeue",
  74. __entry->comm, __entry->pid,
  75. __entry->prio, __entry->nr_running,
  76. __entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed
  77. , __entry->demand, __entry->pred_demand
  78. )
  79. );
  80. #ifdef CONFIG_SCHED_WALT
  81. struct group_cpu_time;
  82. extern const char *task_event_names[];
  83. #if defined(CREATE_TRACE_POINTS) && defined(CONFIG_SCHED_WALT)
  84. static inline void __window_data(u32 *dst, u32 *src)
  85. {
  86. if (src)
  87. memcpy(dst, src, nr_cpu_ids * sizeof(u32));
  88. else
  89. memset(dst, 0, nr_cpu_ids * sizeof(u32));
  90. }
  91. struct trace_seq;
  92. const char *__window_print(struct trace_seq *p, const u32 *buf, int buf_len)
  93. {
  94. int i;
  95. const char *ret = p->buffer + seq_buf_used(&p->seq);
  96. for (i = 0; i < buf_len; i++)
  97. trace_seq_printf(p, "%u ", buf[i]);
  98. trace_seq_putc(p, 0);
  99. return ret;
  100. }
  101. static inline s64 __rq_update_sum(struct rq *rq, bool curr, bool new)
  102. {
  103. if (curr)
  104. if (new)
  105. return rq->nt_curr_runnable_sum;
  106. else
  107. return rq->curr_runnable_sum;
  108. else
  109. if (new)
  110. return rq->nt_prev_runnable_sum;
  111. else
  112. return rq->prev_runnable_sum;
  113. }
  114. static inline s64 __grp_update_sum(struct rq *rq, bool curr, bool new)
  115. {
  116. if (curr)
  117. if (new)
  118. return rq->grp_time.nt_curr_runnable_sum;
  119. else
  120. return rq->grp_time.curr_runnable_sum;
  121. else
  122. if (new)
  123. return rq->grp_time.nt_prev_runnable_sum;
  124. else
  125. return rq->grp_time.prev_runnable_sum;
  126. }
  127. static inline s64
  128. __get_update_sum(struct rq *rq, enum migrate_types migrate_type,
  129. bool src, bool new, bool curr)
  130. {
  131. switch (migrate_type) {
  132. case RQ_TO_GROUP:
  133. if (src)
  134. return __rq_update_sum(rq, curr, new);
  135. else
  136. return __grp_update_sum(rq, curr, new);
  137. case GROUP_TO_RQ:
  138. if (src)
  139. return __grp_update_sum(rq, curr, new);
  140. else
  141. return __rq_update_sum(rq, curr, new);
  142. default:
  143. WARN_ON_ONCE(1);
  144. return -1;
  145. }
  146. }
  147. #endif
  148. TRACE_EVENT(sched_update_pred_demand,
  149. TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int pct,
  150. unsigned int pred_demand),
  151. TP_ARGS(rq, p, runtime, pct, pred_demand),
  152. TP_STRUCT__entry(
  153. __array( char, comm, TASK_COMM_LEN )
  154. __field( pid_t, pid )
  155. __field(unsigned int, runtime )
  156. __field( int, pct )
  157. __field(unsigned int, pred_demand )
  158. __array( u8, bucket, NUM_BUSY_BUCKETS)
  159. __field( int, cpu )
  160. ),
  161. TP_fast_assign(
  162. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  163. __entry->pid = p->pid;
  164. __entry->runtime = runtime;
  165. __entry->pct = pct;
  166. __entry->pred_demand = pred_demand;
  167. memcpy(__entry->bucket, p->ravg.busy_buckets,
  168. NUM_BUSY_BUCKETS * sizeof(u8));
  169. __entry->cpu = rq->cpu;
  170. ),
  171. TP_printk("%d (%s): runtime %u pct %d cpu %d pred_demand %u (buckets: %u %u %u %u %u %u %u %u %u %u)",
  172. __entry->pid, __entry->comm,
  173. __entry->runtime, __entry->pct, __entry->cpu,
  174. __entry->pred_demand, __entry->bucket[0], __entry->bucket[1],
  175. __entry->bucket[2], __entry->bucket[3],__entry->bucket[4],
  176. __entry->bucket[5], __entry->bucket[6], __entry->bucket[7],
  177. __entry->bucket[8], __entry->bucket[9])
  178. );
  179. TRACE_EVENT(sched_update_history,
  180. TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
  181. enum task_event evt),
  182. TP_ARGS(rq, p, runtime, samples, evt),
  183. TP_STRUCT__entry(
  184. __array( char, comm, TASK_COMM_LEN )
  185. __field( pid_t, pid )
  186. __field(unsigned int, runtime )
  187. __field( int, samples )
  188. __field(enum task_event, evt )
  189. __field(unsigned int, demand )
  190. __field(unsigned int, coloc_demand )
  191. __field(unsigned int, pred_demand )
  192. __array( u32, hist, RAVG_HIST_SIZE_MAX)
  193. __field(unsigned int, nr_big_tasks )
  194. __field( int, cpu )
  195. ),
  196. TP_fast_assign(
  197. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  198. __entry->pid = p->pid;
  199. __entry->runtime = runtime;
  200. __entry->samples = samples;
  201. __entry->evt = evt;
  202. __entry->demand = p->ravg.demand;
  203. __entry->coloc_demand = p->ravg.coloc_demand;
  204. __entry->pred_demand = p->ravg.pred_demand;
  205. memcpy(__entry->hist, p->ravg.sum_history,
  206. RAVG_HIST_SIZE_MAX * sizeof(u32));
  207. __entry->nr_big_tasks = rq->walt_stats.nr_big_tasks;
  208. __entry->cpu = rq->cpu;
  209. ),
  210. TP_printk("%d (%s): runtime %u samples %d event %s demand %u coloc_demand %u pred_demand %u"
  211. " (hist: %u %u %u %u %u) cpu %d nr_big %u",
  212. __entry->pid, __entry->comm,
  213. __entry->runtime, __entry->samples,
  214. task_event_names[__entry->evt],
  215. __entry->demand, __entry->coloc_demand, __entry->pred_demand,
  216. __entry->hist[0], __entry->hist[1],
  217. __entry->hist[2], __entry->hist[3],
  218. __entry->hist[4], __entry->cpu, __entry->nr_big_tasks)
  219. );
  220. TRACE_EVENT(sched_get_task_cpu_cycles,
  221. TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time, struct task_struct *p),
  222. TP_ARGS(cpu, event, cycles, exec_time, p),
  223. TP_STRUCT__entry(
  224. __field(int, cpu )
  225. __field(int, event )
  226. __field(u64, cycles )
  227. __field(u64, exec_time )
  228. __field(u32, freq )
  229. __field(u32, legacy_freq )
  230. __field(u32, max_freq )
  231. __field(pid_t, pid )
  232. __array(char, comm, TASK_COMM_LEN )
  233. ),
  234. TP_fast_assign(
  235. __entry->cpu = cpu;
  236. __entry->event = event;
  237. __entry->cycles = cycles;
  238. __entry->exec_time = exec_time;
  239. __entry->freq = cpu_cycles_to_freq(cycles, exec_time);
  240. __entry->legacy_freq = cpu_cur_freq(cpu);
  241. __entry->max_freq = cpu_max_freq(cpu);
  242. __entry->pid = p->pid;
  243. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  244. ),
  245. TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u max_freq=%u task=%d (%s)",
  246. __entry->cpu, __entry->event, __entry->cycles,
  247. __entry->exec_time, __entry->freq, __entry->legacy_freq,
  248. __entry->max_freq, __entry->pid, __entry->comm)
  249. );
  250. TRACE_EVENT(sched_update_task_ravg,
  251. TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
  252. u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time,
  253. struct group_cpu_time *cpu_time),
  254. TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
  255. TP_STRUCT__entry(
  256. __array( char, comm, TASK_COMM_LEN )
  257. __field( pid_t, pid )
  258. __field( pid_t, cur_pid )
  259. __field(unsigned int, cur_freq )
  260. __field( u64, wallclock )
  261. __field( u64, mark_start )
  262. __field( u64, delta_m )
  263. __field( u64, win_start )
  264. __field( u64, delta )
  265. __field( u64, irqtime )
  266. __field(enum task_event, evt )
  267. __field(unsigned int, demand )
  268. __field(unsigned int, coloc_demand )
  269. __field(unsigned int, sum )
  270. __field( int, cpu )
  271. __field(unsigned int, pred_demand )
  272. __field( u64, rq_cs )
  273. __field( u64, rq_ps )
  274. __field( u64, grp_cs )
  275. __field( u64, grp_ps )
  276. __field( u64, grp_nt_cs )
  277. __field( u64, grp_nt_ps )
  278. __field( u32, curr_window )
  279. __field( u32, prev_window )
  280. __dynamic_array(u32, curr_sum, nr_cpu_ids )
  281. __dynamic_array(u32, prev_sum, nr_cpu_ids )
  282. __field( u64, nt_cs )
  283. __field( u64, nt_ps )
  284. __field( u32, active_windows )
  285. __field( u8, curr_top )
  286. __field( u8, prev_top )
  287. ),
  288. TP_fast_assign(
  289. __entry->wallclock = wallclock;
  290. __entry->win_start = rq->window_start;
  291. __entry->delta = (wallclock - rq->window_start);
  292. __entry->evt = evt;
  293. __entry->cpu = rq->cpu;
  294. __entry->cur_pid = rq->curr->pid;
  295. __entry->cur_freq = cpu_cycles_to_freq(cycles, exec_time);
  296. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  297. __entry->pid = p->pid;
  298. __entry->mark_start = p->ravg.mark_start;
  299. __entry->delta_m = (wallclock - p->ravg.mark_start);
  300. __entry->demand = p->ravg.demand;
  301. __entry->coloc_demand = p->ravg.coloc_demand;
  302. __entry->sum = p->ravg.sum;
  303. __entry->irqtime = irqtime;
  304. __entry->pred_demand = p->ravg.pred_demand;
  305. __entry->rq_cs = rq->curr_runnable_sum;
  306. __entry->rq_ps = rq->prev_runnable_sum;
  307. __entry->grp_cs = cpu_time ? cpu_time->curr_runnable_sum : 0;
  308. __entry->grp_ps = cpu_time ? cpu_time->prev_runnable_sum : 0;
  309. __entry->grp_nt_cs = cpu_time ? cpu_time->nt_curr_runnable_sum : 0;
  310. __entry->grp_nt_ps = cpu_time ? cpu_time->nt_prev_runnable_sum : 0;
  311. __entry->curr_window = p->ravg.curr_window;
  312. __entry->prev_window = p->ravg.prev_window;
  313. __window_data(__get_dynamic_array(curr_sum), p->ravg.curr_window_cpu);
  314. __window_data(__get_dynamic_array(prev_sum), p->ravg.prev_window_cpu);
  315. __entry->nt_cs = rq->nt_curr_runnable_sum;
  316. __entry->nt_ps = rq->nt_prev_runnable_sum;
  317. __entry->active_windows = p->ravg.active_windows;
  318. __entry->curr_top = rq->curr_top;
  319. __entry->prev_top = rq->prev_top;
  320. ),
  321. TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u coloc_demand: %u sum %u irqtime %llu pred_demand %u rq_cs %llu rq_ps %llu cur_window %u (%s) prev_window %u (%s) nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu curr_top %u prev_top %u",
  322. __entry->wallclock, __entry->win_start, __entry->delta,
  323. task_event_names[__entry->evt], __entry->cpu,
  324. __entry->cur_freq, __entry->cur_pid,
  325. __entry->pid, __entry->comm, __entry->mark_start,
  326. __entry->delta_m, __entry->demand, __entry->coloc_demand,
  327. __entry->sum, __entry->irqtime, __entry->pred_demand,
  328. __entry->rq_cs, __entry->rq_ps, __entry->curr_window,
  329. __window_print(p, __get_dynamic_array(curr_sum), nr_cpu_ids),
  330. __entry->prev_window,
  331. __window_print(p, __get_dynamic_array(prev_sum), nr_cpu_ids),
  332. __entry->nt_cs, __entry->nt_ps,
  333. __entry->active_windows, __entry->grp_cs,
  334. __entry->grp_ps, __entry->grp_nt_cs, __entry->grp_nt_ps,
  335. __entry->curr_top, __entry->prev_top)
  336. );
  337. TRACE_EVENT(sched_update_task_ravg_mini,
  338. TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
  339. u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time,
  340. struct group_cpu_time *cpu_time),
  341. TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
  342. TP_STRUCT__entry(
  343. __array( char, comm, TASK_COMM_LEN )
  344. __field( pid_t, pid )
  345. __field( u64, wallclock )
  346. __field( u64, mark_start )
  347. __field( u64, delta_m )
  348. __field( u64, win_start )
  349. __field( u64, delta )
  350. __field(enum task_event, evt )
  351. __field(unsigned int, demand )
  352. __field( int, cpu )
  353. __field( u64, rq_cs )
  354. __field( u64, rq_ps )
  355. __field( u64, grp_cs )
  356. __field( u64, grp_ps )
  357. __field( u32, curr_window )
  358. __field( u32, prev_window )
  359. ),
  360. TP_fast_assign(
  361. __entry->wallclock = wallclock;
  362. __entry->win_start = rq->window_start;
  363. __entry->delta = (wallclock - rq->window_start);
  364. __entry->evt = evt;
  365. __entry->cpu = rq->cpu;
  366. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  367. __entry->pid = p->pid;
  368. __entry->mark_start = p->ravg.mark_start;
  369. __entry->delta_m = (wallclock - p->ravg.mark_start);
  370. __entry->demand = p->ravg.demand;
  371. __entry->rq_cs = rq->curr_runnable_sum;
  372. __entry->rq_ps = rq->prev_runnable_sum;
  373. __entry->grp_cs = cpu_time ? cpu_time->curr_runnable_sum : 0;
  374. __entry->grp_ps = cpu_time ? cpu_time->prev_runnable_sum : 0;
  375. __entry->curr_window = p->ravg.curr_window;
  376. __entry->prev_window = p->ravg.prev_window;
  377. ),
  378. TP_printk("wc %llu ws %llu delta %llu event %s cpu %d task %d (%s) ms %llu delta %llu demand %u rq_cs %llu rq_ps %llu cur_window %u prev_window %u grp_cs %lld grp_ps %lld",
  379. __entry->wallclock, __entry->win_start, __entry->delta,
  380. task_event_names[__entry->evt], __entry->cpu,
  381. __entry->pid, __entry->comm, __entry->mark_start,
  382. __entry->delta_m, __entry->demand,
  383. __entry->rq_cs, __entry->rq_ps, __entry->curr_window,
  384. __entry->prev_window,
  385. __entry->grp_cs,
  386. __entry->grp_ps)
  387. );
  388. struct migration_sum_data;
  389. extern const char *migrate_type_names[];
  390. TRACE_EVENT(sched_set_preferred_cluster,
  391. TP_PROTO(struct related_thread_group *grp, u64 total_demand),
  392. TP_ARGS(grp, total_demand),
  393. TP_STRUCT__entry(
  394. __field( int, id )
  395. __field( u64, demand )
  396. __field( int, cluster_first_cpu )
  397. __array( char, comm, TASK_COMM_LEN )
  398. __field( pid_t, pid )
  399. __field(unsigned int, task_demand )
  400. ),
  401. TP_fast_assign(
  402. __entry->id = grp->id;
  403. __entry->demand = total_demand;
  404. __entry->cluster_first_cpu = grp->preferred_cluster ?
  405. cluster_first_cpu(grp->preferred_cluster)
  406. : -1;
  407. ),
  408. TP_printk("group_id %d total_demand %llu preferred_cluster_first_cpu %d",
  409. __entry->id, __entry->demand,
  410. __entry->cluster_first_cpu)
  411. );
  412. TRACE_EVENT(sched_migration_update_sum,
  413. TP_PROTO(struct task_struct *p, enum migrate_types migrate_type, struct rq *rq),
  414. TP_ARGS(p, migrate_type, rq),
  415. TP_STRUCT__entry(
  416. __field(int, tcpu )
  417. __field(int, pid )
  418. __field(enum migrate_types, migrate_type )
  419. __field( s64, src_cs )
  420. __field( s64, src_ps )
  421. __field( s64, dst_cs )
  422. __field( s64, dst_ps )
  423. __field( s64, src_nt_cs )
  424. __field( s64, src_nt_ps )
  425. __field( s64, dst_nt_cs )
  426. __field( s64, dst_nt_ps )
  427. ),
  428. TP_fast_assign(
  429. __entry->tcpu = task_cpu(p);
  430. __entry->pid = p->pid;
  431. __entry->migrate_type = migrate_type;
  432. __entry->src_cs = __get_update_sum(rq, migrate_type,
  433. true, false, true);
  434. __entry->src_ps = __get_update_sum(rq, migrate_type,
  435. true, false, false);
  436. __entry->dst_cs = __get_update_sum(rq, migrate_type,
  437. false, false, true);
  438. __entry->dst_ps = __get_update_sum(rq, migrate_type,
  439. false, false, false);
  440. __entry->src_nt_cs = __get_update_sum(rq, migrate_type,
  441. true, true, true);
  442. __entry->src_nt_ps = __get_update_sum(rq, migrate_type,
  443. true, true, false);
  444. __entry->dst_nt_cs = __get_update_sum(rq, migrate_type,
  445. false, true, true);
  446. __entry->dst_nt_ps = __get_update_sum(rq, migrate_type,
  447. false, true, false);
  448. ),
  449. TP_printk("pid %d task_cpu %d migrate_type %s src_cs %llu src_ps %llu dst_cs %lld dst_ps %lld src_nt_cs %llu src_nt_ps %llu dst_nt_cs %lld dst_nt_ps %lld",
  450. __entry->pid, __entry->tcpu, migrate_type_names[__entry->migrate_type],
  451. __entry->src_cs, __entry->src_ps, __entry->dst_cs, __entry->dst_ps,
  452. __entry->src_nt_cs, __entry->src_nt_ps, __entry->dst_nt_cs, __entry->dst_nt_ps)
  453. );
  454. TRACE_EVENT(sched_set_boost,
  455. TP_PROTO(int type),
  456. TP_ARGS(type),
  457. TP_STRUCT__entry(
  458. __field(int, type )
  459. ),
  460. TP_fast_assign(
  461. __entry->type = type;
  462. ),
  463. TP_printk("type %d", __entry->type)
  464. );
  465. #endif
  466. #ifdef CONFIG_SCHED_WALT
  467. DECLARE_EVENT_CLASS(sched_cpu_load,
  468. TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
  469. TP_ARGS(rq, idle, irqload, power_cost),
  470. TP_STRUCT__entry(
  471. __field(unsigned int, cpu )
  472. __field(unsigned int, idle )
  473. __field(unsigned int, nr_running )
  474. __field(unsigned int, nr_big_tasks )
  475. __field(unsigned int, load_scale_factor )
  476. __field(unsigned int, capacity )
  477. __field( u64, cumulative_runnable_avg )
  478. __field( u64, irqload )
  479. __field(unsigned int, max_freq )
  480. __field(unsigned int, power_cost )
  481. __field( int, cstate )
  482. __field( int, dstate )
  483. ),
  484. TP_fast_assign(
  485. __entry->cpu = rq->cpu;
  486. __entry->idle = idle;
  487. __entry->nr_running = rq->nr_running;
  488. __entry->nr_big_tasks = rq->walt_stats.nr_big_tasks;
  489. __entry->load_scale_factor = cpu_load_scale_factor(rq->cpu);
  490. __entry->capacity = cpu_capacity(rq->cpu);
  491. __entry->cumulative_runnable_avg = rq->walt_stats.cumulative_runnable_avg;
  492. __entry->irqload = irqload;
  493. __entry->max_freq = cpu_max_freq(rq->cpu);
  494. __entry->power_cost = power_cost;
  495. __entry->cstate = rq->cstate;
  496. __entry->dstate = rq->cluster->dstate;
  497. ),
  498. TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d",
  499. __entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
  500. __entry->load_scale_factor, __entry->capacity,
  501. __entry->cumulative_runnable_avg, __entry->irqload,
  502. __entry->max_freq, __entry->power_cost, __entry->cstate,
  503. __entry->dstate)
  504. );
  505. DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
  506. TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
  507. TP_ARGS(rq, idle, irqload, power_cost)
  508. );
  509. TRACE_EVENT(sched_load_to_gov,
  510. TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load, u64 freq_aggr_thresh, u64 load, int policy, int big_task_rotation),
  511. TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr_thresh, load, policy, big_task_rotation),
  512. TP_STRUCT__entry(
  513. __field( int, cpu )
  514. __field( int, policy )
  515. __field( int, ed_task_pid )
  516. __field( u64, aggr_grp_load )
  517. __field( u64, freq_aggr_thresh )
  518. __field( u64, tt_load )
  519. __field( u64, rq_ps )
  520. __field( u64, grp_rq_ps )
  521. __field( u64, nt_ps )
  522. __field( u64, grp_nt_ps )
  523. __field( u64, pl )
  524. __field( u64, load )
  525. __field( int, big_task_rotation )
  526. ),
  527. TP_fast_assign(
  528. __entry->cpu = cpu_of(rq);
  529. __entry->policy = policy;
  530. __entry->ed_task_pid = rq->ed_task ? rq->ed_task->pid : -1;
  531. __entry->aggr_grp_load = aggr_grp_load;
  532. __entry->freq_aggr_thresh = freq_aggr_thresh;
  533. __entry->tt_load = tt_load;
  534. __entry->rq_ps = rq->prev_runnable_sum;
  535. __entry->grp_rq_ps = rq->grp_time.prev_runnable_sum;
  536. __entry->nt_ps = rq->nt_prev_runnable_sum;
  537. __entry->grp_nt_ps = rq->grp_time.nt_prev_runnable_sum;
  538. __entry->pl = rq->walt_stats.pred_demands_sum;
  539. __entry->load = load;
  540. __entry->big_task_rotation = big_task_rotation;
  541. ),
  542. TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr_thresh=%llu tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d",
  543. __entry->cpu, __entry->policy, __entry->ed_task_pid,
  544. __entry->aggr_grp_load, __entry->freq_aggr_thresh,
  545. __entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps,
  546. __entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load,
  547. __entry->big_task_rotation)
  548. );
  549. #endif
  550. #ifdef CONFIG_SMP
  551. TRACE_EVENT(sched_cpu_util,
  552. TP_PROTO(int cpu),
  553. TP_ARGS(cpu),
  554. TP_STRUCT__entry(
  555. __field(unsigned int, cpu )
  556. __field(unsigned int, nr_running )
  557. __field(long, cpu_util )
  558. __field(long, cpu_util_cum )
  559. __field(unsigned int, capacity_curr )
  560. __field(unsigned int, capacity )
  561. __field(unsigned int, capacity_orig )
  562. __field(int, idle_state )
  563. __field(u64, irqload )
  564. ),
  565. TP_fast_assign(
  566. __entry->cpu = cpu;
  567. __entry->nr_running = cpu_rq(cpu)->nr_running;
  568. __entry->cpu_util = cpu_util(cpu);
  569. __entry->cpu_util_cum = cpu_util_cum(cpu, 0);
  570. __entry->capacity_curr = capacity_curr_of(cpu);
  571. __entry->capacity = capacity_of(cpu);
  572. __entry->capacity_orig = capacity_orig_of(cpu);
  573. __entry->idle_state = idle_get_state_idx(cpu_rq(cpu));
  574. __entry->irqload = sched_irqload(cpu);
  575. ),
  576. TP_printk("cpu=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld capacity_curr=%u capacity=%u capacity_orig=%u idle_state=%d irqload=%llu",
  577. __entry->cpu, __entry->nr_running, __entry->cpu_util, __entry->cpu_util_cum, __entry->capacity_curr, __entry->capacity, __entry->capacity_orig, __entry->idle_state, __entry->irqload)
  578. );
  579. TRACE_EVENT(sched_energy_diff,
  580. TP_PROTO(struct task_struct *p, int prev_cpu, unsigned int prev_energy,
  581. int next_cpu, unsigned int next_energy,
  582. int backup_cpu, unsigned int backup_energy),
  583. TP_ARGS(p, prev_cpu, prev_energy, next_cpu, next_energy,
  584. backup_cpu, backup_energy),
  585. TP_STRUCT__entry(
  586. __field(int, pid )
  587. __field(int, prev_cpu )
  588. __field(int, prev_energy )
  589. __field(int, next_cpu )
  590. __field(int, next_energy )
  591. __field(int, backup_cpu )
  592. __field(int, backup_energy )
  593. ),
  594. TP_fast_assign(
  595. __entry->pid = p->pid;
  596. __entry->prev_cpu = prev_cpu;
  597. __entry->prev_energy = prev_energy;
  598. __entry->next_cpu = next_cpu;
  599. __entry->next_energy = next_energy;
  600. __entry->backup_cpu = backup_cpu;
  601. __entry->backup_energy = backup_energy;
  602. ),
  603. TP_printk("pid=%d prev_cpu=%d prev_energy=%u next_cpu=%d next_energy=%u backup_cpu=%d backup_energy=%u",
  604. __entry->pid, __entry->prev_cpu, __entry->prev_energy,
  605. __entry->next_cpu, __entry->next_energy,
  606. __entry->backup_cpu, __entry->backup_energy)
  607. );
  608. TRACE_EVENT(sched_task_util,
  609. TP_PROTO(struct task_struct *p, int next_cpu, int backup_cpu,
  610. int target_cpu, bool need_idle, int fastpath,
  611. bool placement_boost, int rtg_cpu, u64 start_t),
  612. TP_ARGS(p, next_cpu, backup_cpu, target_cpu, need_idle, fastpath,
  613. placement_boost, rtg_cpu, start_t),
  614. TP_STRUCT__entry(
  615. __field(int, pid )
  616. __array(char, comm, TASK_COMM_LEN )
  617. __field(unsigned long, util )
  618. __field(int, prev_cpu )
  619. __field(int, next_cpu )
  620. __field(int, backup_cpu )
  621. __field(int, target_cpu )
  622. __field(bool, need_idle )
  623. __field(int, fastpath )
  624. __field(bool, placement_boost )
  625. __field(int, rtg_cpu )
  626. __field(u64, latency )
  627. ),
  628. TP_fast_assign(
  629. __entry->pid = p->pid;
  630. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  631. __entry->util = task_util(p);
  632. __entry->prev_cpu = task_cpu(p);
  633. __entry->next_cpu = next_cpu;
  634. __entry->backup_cpu = backup_cpu;
  635. __entry->target_cpu = target_cpu;
  636. __entry->need_idle = need_idle;
  637. __entry->fastpath = fastpath;
  638. __entry->placement_boost = placement_boost;
  639. __entry->rtg_cpu = rtg_cpu;
  640. __entry->latency = (sched_clock() - start_t);
  641. ),
  642. TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d need_idle=%d fastpath=%d placement_boost=%d rtg_cpu=%d latency=%llu",
  643. __entry->pid, __entry->comm, __entry->util, __entry->prev_cpu, __entry->next_cpu, __entry->backup_cpu, __entry->target_cpu, __entry->need_idle, __entry->fastpath, __entry->placement_boost, __entry->rtg_cpu, __entry->latency)
  644. );
  645. #endif
  646. /*
  647. * Tracepoint for waking up a task:
  648. */
  649. DECLARE_EVENT_CLASS(sched_wakeup_template,
  650. TP_PROTO(struct task_struct *p),
  651. TP_ARGS(__perf_task(p)),
  652. TP_STRUCT__entry(
  653. __array( char, comm, TASK_COMM_LEN )
  654. __field( pid_t, pid )
  655. __field( int, prio )
  656. __field( int, success )
  657. __field( int, target_cpu )
  658. ),
  659. TP_fast_assign(
  660. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  661. __entry->pid = p->pid;
  662. __entry->prio = p->prio;
  663. __entry->success = 1; /* rudiment, kill when possible */
  664. __entry->target_cpu = task_cpu(p);
  665. ),
  666. TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
  667. __entry->comm, __entry->pid, __entry->prio,
  668. __entry->target_cpu)
  669. );
  670. /*
  671. * Tracepoint called when waking a task; this tracepoint is guaranteed to be
  672. * called from the waking context.
  673. */
  674. DEFINE_EVENT(sched_wakeup_template, sched_waking,
  675. TP_PROTO(struct task_struct *p),
  676. TP_ARGS(p));
  677. /*
  678. * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
  679. * It it not always called from the waking context.
  680. */
  681. DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
  682. TP_PROTO(struct task_struct *p),
  683. TP_ARGS(p));
  684. /*
  685. * Tracepoint for waking up a new task:
  686. */
  687. DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
  688. TP_PROTO(struct task_struct *p),
  689. TP_ARGS(p));
  690. #ifdef CREATE_TRACE_POINTS
  691. static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
  692. {
  693. #ifdef CONFIG_SCHED_DEBUG
  694. BUG_ON(p != current);
  695. #endif /* CONFIG_SCHED_DEBUG */
  696. /*
  697. * Preemption ignores task state, therefore preempted tasks are always
  698. * RUNNING (we will not have dequeued if state != RUNNING).
  699. */
  700. return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
  701. }
  702. #endif /* CREATE_TRACE_POINTS */
  703. /*
  704. * Tracepoint for task switches, performed by the scheduler:
  705. */
  706. TRACE_EVENT(sched_switch,
  707. TP_PROTO(bool preempt,
  708. struct task_struct *prev,
  709. struct task_struct *next),
  710. TP_ARGS(preempt, prev, next),
  711. TP_STRUCT__entry(
  712. __array( char, prev_comm, TASK_COMM_LEN )
  713. __field( pid_t, prev_pid )
  714. __field( int, prev_prio )
  715. __field( long, prev_state )
  716. __array( char, next_comm, TASK_COMM_LEN )
  717. __field( pid_t, next_pid )
  718. __field( int, next_prio )
  719. ),
  720. TP_fast_assign(
  721. memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
  722. __entry->prev_pid = prev->pid;
  723. __entry->prev_prio = prev->prio;
  724. __entry->prev_state = __trace_sched_switch_state(preempt, prev);
  725. memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
  726. __entry->next_pid = next->pid;
  727. __entry->next_prio = next->prio;
  728. ),
  729. TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
  730. __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
  731. __entry->prev_state & (TASK_STATE_MAX-1) ?
  732. __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
  733. { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
  734. { 16, "Z" }, { 32, "X" }, { 64, "x" },
  735. { 128, "K" }, { 256, "W" }, { 512, "P" },
  736. { 1024, "N" }) : "R",
  737. __entry->prev_state & TASK_STATE_MAX ? "+" : "",
  738. __entry->next_comm, __entry->next_pid, __entry->next_prio)
  739. );
  740. /*
  741. * Tracepoint for a task being migrated:
  742. */
  743. TRACE_EVENT(sched_migrate_task,
  744. TP_PROTO(struct task_struct *p, int dest_cpu, unsigned int load),
  745. TP_ARGS(p, dest_cpu, load),
  746. TP_STRUCT__entry(
  747. __array( char, comm, TASK_COMM_LEN )
  748. __field( pid_t, pid )
  749. __field( int, prio )
  750. __field(unsigned int, load )
  751. __field( int, orig_cpu )
  752. __field( int, dest_cpu )
  753. ),
  754. TP_fast_assign(
  755. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  756. __entry->pid = p->pid;
  757. __entry->prio = p->prio;
  758. __entry->load = load;
  759. __entry->orig_cpu = task_cpu(p);
  760. __entry->dest_cpu = dest_cpu;
  761. ),
  762. TP_printk("comm=%s pid=%d prio=%d load=%d orig_cpu=%d dest_cpu=%d",
  763. __entry->comm, __entry->pid, __entry->prio, __entry->load,
  764. __entry->orig_cpu, __entry->dest_cpu)
  765. );
  766. /*
  767. * Tracepoint for a CPU going offline/online:
  768. */
  769. TRACE_EVENT(sched_cpu_hotplug,
  770. TP_PROTO(int affected_cpu, int error, int status),
  771. TP_ARGS(affected_cpu, error, status),
  772. TP_STRUCT__entry(
  773. __field( int, affected_cpu )
  774. __field( int, error )
  775. __field( int, status )
  776. ),
  777. TP_fast_assign(
  778. __entry->affected_cpu = affected_cpu;
  779. __entry->error = error;
  780. __entry->status = status;
  781. ),
  782. TP_printk("cpu %d %s error=%d", __entry->affected_cpu,
  783. __entry->status ? "online" : "offline", __entry->error)
  784. );
  785. /*
  786. * Tracepoint for load balancing:
  787. */
  788. #if NR_CPUS > 32
  789. #error "Unsupported NR_CPUS for lb tracepoint."
  790. #endif
  791. TRACE_EVENT(sched_load_balance,
  792. TP_PROTO(int cpu, enum cpu_idle_type idle, int balance,
  793. unsigned long group_mask, int busiest_nr_running,
  794. unsigned long imbalance, unsigned int env_flags, int ld_moved,
  795. unsigned int balance_interval),
  796. TP_ARGS(cpu, idle, balance, group_mask, busiest_nr_running,
  797. imbalance, env_flags, ld_moved, balance_interval),
  798. TP_STRUCT__entry(
  799. __field( int, cpu)
  800. __field( enum cpu_idle_type, idle)
  801. __field( int, balance)
  802. __field( unsigned long, group_mask)
  803. __field( int, busiest_nr_running)
  804. __field( unsigned long, imbalance)
  805. __field( unsigned int, env_flags)
  806. __field( int, ld_moved)
  807. __field( unsigned int, balance_interval)
  808. ),
  809. TP_fast_assign(
  810. __entry->cpu = cpu;
  811. __entry->idle = idle;
  812. __entry->balance = balance;
  813. __entry->group_mask = group_mask;
  814. __entry->busiest_nr_running = busiest_nr_running;
  815. __entry->imbalance = imbalance;
  816. __entry->env_flags = env_flags;
  817. __entry->ld_moved = ld_moved;
  818. __entry->balance_interval = balance_interval;
  819. ),
  820. TP_printk("cpu=%d state=%s balance=%d group=%#lx busy_nr=%d imbalance=%ld flags=%#x ld_moved=%d bal_int=%d",
  821. __entry->cpu,
  822. __entry->idle == CPU_IDLE ? "idle" :
  823. (__entry->idle == CPU_NEWLY_IDLE ? "newly_idle" : "busy"),
  824. __entry->balance,
  825. __entry->group_mask, __entry->busiest_nr_running,
  826. __entry->imbalance, __entry->env_flags, __entry->ld_moved,
  827. __entry->balance_interval)
  828. );
  829. DECLARE_EVENT_CLASS(sched_process_template,
  830. TP_PROTO(struct task_struct *p),
  831. TP_ARGS(p),
  832. TP_STRUCT__entry(
  833. __array( char, comm, TASK_COMM_LEN )
  834. __field( pid_t, pid )
  835. __field( int, prio )
  836. ),
  837. TP_fast_assign(
  838. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  839. __entry->pid = p->pid;
  840. __entry->prio = p->prio;
  841. ),
  842. TP_printk("comm=%s pid=%d prio=%d",
  843. __entry->comm, __entry->pid, __entry->prio)
  844. );
  845. /*
  846. * Tracepoint for freeing a task:
  847. */
  848. DEFINE_EVENT(sched_process_template, sched_process_free,
  849. TP_PROTO(struct task_struct *p),
  850. TP_ARGS(p));
  851. /*
  852. * Tracepoint for a task exiting:
  853. */
  854. DEFINE_EVENT(sched_process_template, sched_process_exit,
  855. TP_PROTO(struct task_struct *p),
  856. TP_ARGS(p));
  857. /*
  858. * Tracepoint for waiting on task to unschedule:
  859. */
  860. DEFINE_EVENT(sched_process_template, sched_wait_task,
  861. TP_PROTO(struct task_struct *p),
  862. TP_ARGS(p));
  863. /*
  864. * Tracepoint for a waiting task:
  865. */
  866. TRACE_EVENT(sched_process_wait,
  867. TP_PROTO(struct pid *pid),
  868. TP_ARGS(pid),
  869. TP_STRUCT__entry(
  870. __array( char, comm, TASK_COMM_LEN )
  871. __field( pid_t, pid )
  872. __field( int, prio )
  873. ),
  874. TP_fast_assign(
  875. memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
  876. __entry->pid = pid_nr(pid);
  877. __entry->prio = current->prio;
  878. ),
  879. TP_printk("comm=%s pid=%d prio=%d",
  880. __entry->comm, __entry->pid, __entry->prio)
  881. );
  882. /*
  883. * Tracepoint for do_fork:
  884. */
  885. TRACE_EVENT(sched_process_fork,
  886. TP_PROTO(struct task_struct *parent, struct task_struct *child),
  887. TP_ARGS(parent, child),
  888. TP_STRUCT__entry(
  889. __array( char, parent_comm, TASK_COMM_LEN )
  890. __field( pid_t, parent_pid )
  891. __array( char, child_comm, TASK_COMM_LEN )
  892. __field( pid_t, child_pid )
  893. ),
  894. TP_fast_assign(
  895. memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
  896. __entry->parent_pid = parent->pid;
  897. memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
  898. __entry->child_pid = child->pid;
  899. ),
  900. TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
  901. __entry->parent_comm, __entry->parent_pid,
  902. __entry->child_comm, __entry->child_pid)
  903. );
  904. /*
  905. * Tracepoint for exec:
  906. */
  907. TRACE_EVENT(sched_process_exec,
  908. TP_PROTO(struct task_struct *p, pid_t old_pid,
  909. struct linux_binprm *bprm),
  910. TP_ARGS(p, old_pid, bprm),
  911. TP_STRUCT__entry(
  912. __string( filename, bprm->filename )
  913. __field( pid_t, pid )
  914. __field( pid_t, old_pid )
  915. ),
  916. TP_fast_assign(
  917. __assign_str(filename, bprm->filename);
  918. __entry->pid = p->pid;
  919. __entry->old_pid = old_pid;
  920. ),
  921. TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
  922. __entry->pid, __entry->old_pid)
  923. );
  924. /*
  925. * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
  926. * adding sched_stat support to SCHED_FIFO/RR would be welcome.
  927. */
  928. DECLARE_EVENT_CLASS(sched_stat_template,
  929. TP_PROTO(struct task_struct *tsk, u64 delay),
  930. TP_ARGS(__perf_task(tsk), __perf_count(delay)),
  931. TP_STRUCT__entry(
  932. __array( char, comm, TASK_COMM_LEN )
  933. __field( pid_t, pid )
  934. __field( u64, delay )
  935. ),
  936. TP_fast_assign(
  937. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  938. __entry->pid = tsk->pid;
  939. __entry->delay = delay;
  940. ),
  941. TP_printk("comm=%s pid=%d delay=%Lu [ns]",
  942. __entry->comm, __entry->pid,
  943. (unsigned long long)__entry->delay)
  944. );
  945. /*
  946. * Tracepoint for accounting wait time (time the task is runnable
  947. * but not actually running due to scheduler contention).
  948. */
  949. DEFINE_EVENT(sched_stat_template, sched_stat_wait,
  950. TP_PROTO(struct task_struct *tsk, u64 delay),
  951. TP_ARGS(tsk, delay));
  952. /*
  953. * Tracepoint for accounting sleep time (time the task is not runnable,
  954. * including iowait, see below).
  955. */
  956. DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
  957. TP_PROTO(struct task_struct *tsk, u64 delay),
  958. TP_ARGS(tsk, delay));
  959. /*
  960. * Tracepoint for accounting iowait time (time the task is not runnable
  961. * due to waiting on IO to complete).
  962. */
  963. DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
  964. TP_PROTO(struct task_struct *tsk, u64 delay),
  965. TP_ARGS(tsk, delay));
  966. /*
  967. * Tracepoint for accounting blocked time (time the task is in uninterruptible).
  968. */
  969. DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
  970. TP_PROTO(struct task_struct *tsk, u64 delay),
  971. TP_ARGS(tsk, delay));
  972. /*
  973. * Tracepoint for recording the cause of uninterruptible sleep.
  974. */
  975. TRACE_EVENT(sched_blocked_reason,
  976. TP_PROTO(struct task_struct *tsk),
  977. TP_ARGS(tsk),
  978. TP_STRUCT__entry(
  979. __field( pid_t, pid )
  980. __field( void*, caller )
  981. __field( bool, io_wait )
  982. ),
  983. TP_fast_assign(
  984. __entry->pid = tsk->pid;
  985. __entry->caller = (void*)get_wchan(tsk);
  986. __entry->io_wait = tsk->in_iowait;
  987. ),
  988. TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
  989. );
  990. /*
  991. * Tracepoint for accounting runtime (time the task is executing
  992. * on a CPU).
  993. */
  994. DECLARE_EVENT_CLASS(sched_stat_runtime,
  995. TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
  996. TP_ARGS(tsk, __perf_count(runtime), vruntime),
  997. TP_STRUCT__entry(
  998. __array( char, comm, TASK_COMM_LEN )
  999. __field( pid_t, pid )
  1000. __field( u64, runtime )
  1001. __field( u64, vruntime )
  1002. ),
  1003. TP_fast_assign(
  1004. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  1005. __entry->pid = tsk->pid;
  1006. __entry->runtime = runtime;
  1007. __entry->vruntime = vruntime;
  1008. ),
  1009. TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
  1010. __entry->comm, __entry->pid,
  1011. (unsigned long long)__entry->runtime,
  1012. (unsigned long long)__entry->vruntime)
  1013. );
  1014. DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
  1015. TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
  1016. TP_ARGS(tsk, runtime, vruntime));
  1017. /*
  1018. * Tracepoint for showing priority inheritance modifying a tasks
  1019. * priority.
  1020. */
  1021. TRACE_EVENT(sched_pi_setprio,
  1022. TP_PROTO(struct task_struct *tsk, int newprio),
  1023. TP_ARGS(tsk, newprio),
  1024. TP_STRUCT__entry(
  1025. __array( char, comm, TASK_COMM_LEN )
  1026. __field( pid_t, pid )
  1027. __field( int, oldprio )
  1028. __field( int, newprio )
  1029. ),
  1030. TP_fast_assign(
  1031. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  1032. __entry->pid = tsk->pid;
  1033. __entry->oldprio = tsk->prio;
  1034. __entry->newprio = newprio;
  1035. ),
  1036. TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
  1037. __entry->comm, __entry->pid,
  1038. __entry->oldprio, __entry->newprio)
  1039. );
  1040. #ifdef CONFIG_DETECT_HUNG_TASK
  1041. TRACE_EVENT(sched_process_hang,
  1042. TP_PROTO(struct task_struct *tsk),
  1043. TP_ARGS(tsk),
  1044. TP_STRUCT__entry(
  1045. __array( char, comm, TASK_COMM_LEN )
  1046. __field( pid_t, pid )
  1047. ),
  1048. TP_fast_assign(
  1049. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  1050. __entry->pid = tsk->pid;
  1051. ),
  1052. TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
  1053. );
  1054. #endif /* CONFIG_DETECT_HUNG_TASK */
  1055. DECLARE_EVENT_CLASS(sched_move_task_template,
  1056. TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
  1057. TP_ARGS(tsk, src_cpu, dst_cpu),
  1058. TP_STRUCT__entry(
  1059. __field( pid_t, pid )
  1060. __field( pid_t, tgid )
  1061. __field( pid_t, ngid )
  1062. __field( int, src_cpu )
  1063. __field( int, src_nid )
  1064. __field( int, dst_cpu )
  1065. __field( int, dst_nid )
  1066. ),
  1067. TP_fast_assign(
  1068. __entry->pid = task_pid_nr(tsk);
  1069. __entry->tgid = task_tgid_nr(tsk);
  1070. __entry->ngid = task_numa_group_id(tsk);
  1071. __entry->src_cpu = src_cpu;
  1072. __entry->src_nid = cpu_to_node(src_cpu);
  1073. __entry->dst_cpu = dst_cpu;
  1074. __entry->dst_nid = cpu_to_node(dst_cpu);
  1075. ),
  1076. TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
  1077. __entry->pid, __entry->tgid, __entry->ngid,
  1078. __entry->src_cpu, __entry->src_nid,
  1079. __entry->dst_cpu, __entry->dst_nid)
  1080. );
  1081. /*
  1082. * Tracks migration of tasks from one runqueue to another. Can be used to
  1083. * detect if automatic NUMA balancing is bouncing between nodes
  1084. */
  1085. DEFINE_EVENT(sched_move_task_template, sched_move_numa,
  1086. TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
  1087. TP_ARGS(tsk, src_cpu, dst_cpu)
  1088. );
  1089. DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
  1090. TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
  1091. TP_ARGS(tsk, src_cpu, dst_cpu)
  1092. );
  1093. TRACE_EVENT(sched_swap_numa,
  1094. TP_PROTO(struct task_struct *src_tsk, int src_cpu,
  1095. struct task_struct *dst_tsk, int dst_cpu),
  1096. TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
  1097. TP_STRUCT__entry(
  1098. __field( pid_t, src_pid )
  1099. __field( pid_t, src_tgid )
  1100. __field( pid_t, src_ngid )
  1101. __field( int, src_cpu )
  1102. __field( int, src_nid )
  1103. __field( pid_t, dst_pid )
  1104. __field( pid_t, dst_tgid )
  1105. __field( pid_t, dst_ngid )
  1106. __field( int, dst_cpu )
  1107. __field( int, dst_nid )
  1108. ),
  1109. TP_fast_assign(
  1110. __entry->src_pid = task_pid_nr(src_tsk);
  1111. __entry->src_tgid = task_tgid_nr(src_tsk);
  1112. __entry->src_ngid = task_numa_group_id(src_tsk);
  1113. __entry->src_cpu = src_cpu;
  1114. __entry->src_nid = cpu_to_node(src_cpu);
  1115. __entry->dst_pid = task_pid_nr(dst_tsk);
  1116. __entry->dst_tgid = task_tgid_nr(dst_tsk);
  1117. __entry->dst_ngid = task_numa_group_id(dst_tsk);
  1118. __entry->dst_cpu = dst_cpu;
  1119. __entry->dst_nid = cpu_to_node(dst_cpu);
  1120. ),
  1121. TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
  1122. __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
  1123. __entry->src_cpu, __entry->src_nid,
  1124. __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
  1125. __entry->dst_cpu, __entry->dst_nid)
  1126. );
  1127. /*
  1128. * Tracepoint for waking a polling cpu without an IPI.
  1129. */
  1130. TRACE_EVENT(sched_wake_idle_without_ipi,
  1131. TP_PROTO(int cpu),
  1132. TP_ARGS(cpu),
  1133. TP_STRUCT__entry(
  1134. __field( int, cpu )
  1135. ),
  1136. TP_fast_assign(
  1137. __entry->cpu = cpu;
  1138. ),
  1139. TP_printk("cpu=%d", __entry->cpu)
  1140. );
  1141. TRACE_EVENT(sched_contrib_scale_f,
  1142. TP_PROTO(int cpu, unsigned long freq_scale_factor,
  1143. unsigned long cpu_scale_factor),
  1144. TP_ARGS(cpu, freq_scale_factor, cpu_scale_factor),
  1145. TP_STRUCT__entry(
  1146. __field(int, cpu)
  1147. __field(unsigned long, freq_scale_factor)
  1148. __field(unsigned long, cpu_scale_factor)
  1149. ),
  1150. TP_fast_assign(
  1151. __entry->cpu = cpu;
  1152. __entry->freq_scale_factor = freq_scale_factor;
  1153. __entry->cpu_scale_factor = cpu_scale_factor;
  1154. ),
  1155. TP_printk("cpu=%d freq_scale_factor=%lu cpu_scale_factor=%lu",
  1156. __entry->cpu, __entry->freq_scale_factor,
  1157. __entry->cpu_scale_factor)
  1158. );
  1159. #ifdef CONFIG_SMP
  1160. #ifdef CONFIG_SCHED_WALT
  1161. extern unsigned int sysctl_sched_use_walt_cpu_util;
  1162. extern unsigned int sysctl_sched_use_walt_task_util;
  1163. extern unsigned int sched_ravg_window;
  1164. extern unsigned int walt_disabled;
  1165. #endif
  1166. /*
  1167. * Tracepoint for accounting sched averages for tasks.
  1168. */
  1169. TRACE_EVENT(sched_load_avg_task,
  1170. TP_PROTO(struct task_struct *tsk, struct sched_avg *avg, void *_ravg),
  1171. TP_ARGS(tsk, avg, _ravg),
  1172. TP_STRUCT__entry(
  1173. __array( char, comm, TASK_COMM_LEN )
  1174. __field( pid_t, pid )
  1175. __field( int, cpu )
  1176. __field( unsigned long, load_avg )
  1177. __field( unsigned long, util_avg )
  1178. __field( unsigned long, util_avg_pelt )
  1179. __field( u32, util_avg_walt )
  1180. __field( u64, load_sum )
  1181. __field( u32, util_sum )
  1182. __field( u32, period_contrib )
  1183. ),
  1184. TP_fast_assign(
  1185. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  1186. __entry->pid = tsk->pid;
  1187. __entry->cpu = task_cpu(tsk);
  1188. __entry->load_avg = avg->load_avg;
  1189. __entry->util_avg = avg->util_avg;
  1190. __entry->load_sum = avg->load_sum;
  1191. __entry->util_sum = avg->util_sum;
  1192. __entry->period_contrib = avg->period_contrib;
  1193. __entry->util_avg_pelt = avg->util_avg;
  1194. __entry->util_avg_walt = 0;
  1195. #ifdef CONFIG_SCHED_WALT
  1196. __entry->util_avg_walt = ((struct ravg*)_ravg)->demand /
  1197. (sched_ravg_window >> SCHED_CAPACITY_SHIFT);
  1198. if (!walt_disabled && sysctl_sched_use_walt_task_util)
  1199. __entry->util_avg = __entry->util_avg_walt;
  1200. #endif
  1201. ),
  1202. TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu "
  1203. "util_avg_pelt=%lu util_avg_walt=%u load_sum=%llu"
  1204. " util_sum=%u period_contrib=%u",
  1205. __entry->comm,
  1206. __entry->pid,
  1207. __entry->cpu,
  1208. __entry->load_avg,
  1209. __entry->util_avg,
  1210. __entry->util_avg_pelt,
  1211. __entry->util_avg_walt,
  1212. (u64)__entry->load_sum,
  1213. (u32)__entry->util_sum,
  1214. (u32)__entry->period_contrib)
  1215. );
  1216. /*
  1217. * Tracepoint for accounting sched averages for cpus.
  1218. */
  1219. TRACE_EVENT(sched_load_avg_cpu,
  1220. TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
  1221. TP_ARGS(cpu, cfs_rq),
  1222. TP_STRUCT__entry(
  1223. __field( int, cpu )
  1224. __field( unsigned long, load_avg )
  1225. __field( unsigned long, util_avg )
  1226. __field( unsigned long, util_avg_pelt )
  1227. __field( u32, util_avg_walt )
  1228. ),
  1229. TP_fast_assign(
  1230. __entry->cpu = cpu;
  1231. __entry->load_avg = cfs_rq->avg.load_avg;
  1232. __entry->util_avg = cfs_rq->avg.util_avg;
  1233. __entry->util_avg_pelt = cfs_rq->avg.util_avg;
  1234. __entry->util_avg_walt = 0;
  1235. #ifdef CONFIG_SCHED_WALT
  1236. __entry->util_avg_walt = div64_ul(cpu_rq(cpu)->prev_runnable_sum,
  1237. sched_ravg_window >> SCHED_CAPACITY_SHIFT);
  1238. if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
  1239. __entry->util_avg = __entry->util_avg_walt;
  1240. #endif
  1241. ),
  1242. TP_printk("cpu=%d load_avg=%lu util_avg=%lu "
  1243. "util_avg_pelt=%lu util_avg_walt=%u",
  1244. __entry->cpu, __entry->load_avg, __entry->util_avg,
  1245. __entry->util_avg_pelt, __entry->util_avg_walt)
  1246. );
  1247. /*
  1248. * Tracepoint for sched_tune_config settings
  1249. */
  1250. TRACE_EVENT(sched_tune_config,
  1251. TP_PROTO(int boost),
  1252. TP_ARGS(boost),
  1253. TP_STRUCT__entry(
  1254. __field( int, boost )
  1255. ),
  1256. TP_fast_assign(
  1257. __entry->boost = boost;
  1258. ),
  1259. TP_printk("boost=%d ", __entry->boost)
  1260. );
  1261. /*
  1262. * Tracepoint for accounting CPU boosted utilization
  1263. */
  1264. TRACE_EVENT(sched_boost_cpu,
  1265. TP_PROTO(int cpu, unsigned long util, long margin),
  1266. TP_ARGS(cpu, util, margin),
  1267. TP_STRUCT__entry(
  1268. __field( int, cpu )
  1269. __field( unsigned long, util )
  1270. __field(long, margin )
  1271. ),
  1272. TP_fast_assign(
  1273. __entry->cpu = cpu;
  1274. __entry->util = util;
  1275. __entry->margin = margin;
  1276. ),
  1277. TP_printk("cpu=%d util=%lu margin=%ld",
  1278. __entry->cpu,
  1279. __entry->util,
  1280. __entry->margin)
  1281. );
  1282. /*
  1283. * Tracepoint for schedtune_tasks_update
  1284. */
  1285. TRACE_EVENT(sched_tune_tasks_update,
  1286. TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx,
  1287. int boost, int max_boost),
  1288. TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost),
  1289. TP_STRUCT__entry(
  1290. __array( char, comm, TASK_COMM_LEN )
  1291. __field( pid_t, pid )
  1292. __field( int, cpu )
  1293. __field( int, tasks )
  1294. __field( int, idx )
  1295. __field( int, boost )
  1296. __field( int, max_boost )
  1297. ),
  1298. TP_fast_assign(
  1299. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  1300. __entry->pid = tsk->pid;
  1301. __entry->cpu = cpu;
  1302. __entry->tasks = tasks;
  1303. __entry->idx = idx;
  1304. __entry->boost = boost;
  1305. __entry->max_boost = max_boost;
  1306. ),
  1307. TP_printk("pid=%d comm=%s "
  1308. "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d",
  1309. __entry->pid, __entry->comm,
  1310. __entry->cpu, __entry->tasks, __entry->idx,
  1311. __entry->boost, __entry->max_boost)
  1312. );
  1313. /*
  1314. * Tracepoint for schedtune_boostgroup_update
  1315. */
  1316. TRACE_EVENT(sched_tune_boostgroup_update,
  1317. TP_PROTO(int cpu, int variation, int max_boost),
  1318. TP_ARGS(cpu, variation, max_boost),
  1319. TP_STRUCT__entry(
  1320. __field( int, cpu )
  1321. __field( int, variation )
  1322. __field( int, max_boost )
  1323. ),
  1324. TP_fast_assign(
  1325. __entry->cpu = cpu;
  1326. __entry->variation = variation;
  1327. __entry->max_boost = max_boost;
  1328. ),
  1329. TP_printk("cpu=%d variation=%d max_boost=%d",
  1330. __entry->cpu, __entry->variation, __entry->max_boost)
  1331. );
  1332. /*
  1333. * Tracepoint for accounting task boosted utilization
  1334. */
  1335. TRACE_EVENT(sched_boost_task,
  1336. TP_PROTO(struct task_struct *tsk, unsigned long util, long margin),
  1337. TP_ARGS(tsk, util, margin),
  1338. TP_STRUCT__entry(
  1339. __array( char, comm, TASK_COMM_LEN )
  1340. __field( pid_t, pid )
  1341. __field( unsigned long, util )
  1342. __field( long, margin )
  1343. ),
  1344. TP_fast_assign(
  1345. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  1346. __entry->pid = tsk->pid;
  1347. __entry->util = util;
  1348. __entry->margin = margin;
  1349. ),
  1350. TP_printk("comm=%s pid=%d util=%lu margin=%ld",
  1351. __entry->comm, __entry->pid,
  1352. __entry->util,
  1353. __entry->margin)
  1354. );
  1355. /*
  1356. * Tracepoint for find_best_target
  1357. */
  1358. TRACE_EVENT(sched_find_best_target,
  1359. TP_PROTO(struct task_struct *tsk, bool prefer_idle,
  1360. unsigned long min_util, int start_cpu,
  1361. int best_idle, int best_active, int target),
  1362. TP_ARGS(tsk, prefer_idle, min_util, start_cpu,
  1363. best_idle, best_active, target),
  1364. TP_STRUCT__entry(
  1365. __array( char, comm, TASK_COMM_LEN )
  1366. __field( pid_t, pid )
  1367. __field( unsigned long, min_util )
  1368. __field( bool, prefer_idle )
  1369. __field( int, start_cpu )
  1370. __field( int, best_idle )
  1371. __field( int, best_active )
  1372. __field( int, target )
  1373. ),
  1374. TP_fast_assign(
  1375. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  1376. __entry->pid = tsk->pid;
  1377. __entry->min_util = min_util;
  1378. __entry->prefer_idle = prefer_idle;
  1379. __entry->start_cpu = start_cpu;
  1380. __entry->best_idle = best_idle;
  1381. __entry->best_active = best_active;
  1382. __entry->target = target;
  1383. ),
  1384. TP_printk("pid=%d comm=%s prefer_idle=%d start_cpu=%d "
  1385. "best_idle=%d best_active=%d target=%d",
  1386. __entry->pid, __entry->comm,
  1387. __entry->prefer_idle, __entry->start_cpu,
  1388. __entry->best_idle, __entry->best_active,
  1389. __entry->target)
  1390. );
  1391. TRACE_EVENT(sched_group_energy,
  1392. TP_PROTO(int cpu, long group_util, u64 total_nrg,
  1393. int busy_nrg, int idle_nrg, int grp_idle_idx,
  1394. int new_capacity),
  1395. TP_ARGS(cpu, group_util, total_nrg,
  1396. busy_nrg, idle_nrg, grp_idle_idx,
  1397. new_capacity),
  1398. TP_STRUCT__entry(
  1399. __field(int, cpu)
  1400. __field(long, group_util)
  1401. __field(u64, total_nrg)
  1402. __field(int, busy_nrg)
  1403. __field(int, idle_nrg)
  1404. __field(int, grp_idle_idx)
  1405. __field(int, new_capacity)
  1406. ),
  1407. TP_fast_assign(
  1408. __entry->cpu = cpu;
  1409. __entry->group_util = group_util;
  1410. __entry->total_nrg = total_nrg;
  1411. __entry->busy_nrg = busy_nrg;
  1412. __entry->idle_nrg = idle_nrg;
  1413. __entry->grp_idle_idx = grp_idle_idx;
  1414. __entry->new_capacity = new_capacity;
  1415. ),
  1416. TP_printk("cpu=%d group_util=%ld total_nrg=%llu busy_nrg=%d idle_nrg=%d grp_idle_idx=%d new_capacity=%d",
  1417. __entry->cpu, __entry->group_util,
  1418. __entry->total_nrg, __entry->busy_nrg, __entry->idle_nrg,
  1419. __entry->grp_idle_idx, __entry->new_capacity)
  1420. );
  1421. /*
  1422. * Tracepoint for schedtune_tasks_update
  1423. */
  1424. TRACE_EVENT(sched_tune_filter,
  1425. TP_PROTO(int nrg_delta, int cap_delta,
  1426. int nrg_gain, int cap_gain,
  1427. int payoff, int region),
  1428. TP_ARGS(nrg_delta, cap_delta, nrg_gain, cap_gain, payoff, region),
  1429. TP_STRUCT__entry(
  1430. __field( int, nrg_delta )
  1431. __field( int, cap_delta )
  1432. __field( int, nrg_gain )
  1433. __field( int, cap_gain )
  1434. __field( int, payoff )
  1435. __field( int, region )
  1436. ),
  1437. TP_fast_assign(
  1438. __entry->nrg_delta = nrg_delta;
  1439. __entry->cap_delta = cap_delta;
  1440. __entry->nrg_gain = nrg_gain;
  1441. __entry->cap_gain = cap_gain;
  1442. __entry->payoff = payoff;
  1443. __entry->region = region;
  1444. ),
  1445. TP_printk("nrg_delta=%d cap_delta=%d nrg_gain=%d cap_gain=%d payoff=%d region=%d",
  1446. __entry->nrg_delta, __entry->cap_delta,
  1447. __entry->nrg_gain, __entry->cap_gain,
  1448. __entry->payoff, __entry->region)
  1449. );
  1450. /*
  1451. * Tracepoint for system overutilized flag
  1452. */
  1453. TRACE_EVENT(sched_overutilized,
  1454. TP_PROTO(bool overutilized),
  1455. TP_ARGS(overutilized),
  1456. TP_STRUCT__entry(
  1457. __field( bool, overutilized )
  1458. ),
  1459. TP_fast_assign(
  1460. __entry->overutilized = overutilized;
  1461. ),
  1462. TP_printk("overutilized=%d",
  1463. __entry->overutilized ? 1 : 0)
  1464. );
  1465. #endif
  1466. TRACE_EVENT(sched_get_nr_running_avg,
  1467. TP_PROTO(int avg, int big_avg, int iowait_avg,
  1468. unsigned int max_nr, unsigned int big_max_nr),
  1469. TP_ARGS(avg, big_avg, iowait_avg, max_nr, big_max_nr),
  1470. TP_STRUCT__entry(
  1471. __field( int, avg )
  1472. __field( int, big_avg )
  1473. __field( int, iowait_avg )
  1474. __field( unsigned int, max_nr )
  1475. __field( unsigned int, big_max_nr )
  1476. ),
  1477. TP_fast_assign(
  1478. __entry->avg = avg;
  1479. __entry->big_avg = big_avg;
  1480. __entry->iowait_avg = iowait_avg;
  1481. __entry->max_nr = max_nr;
  1482. __entry->big_max_nr = big_max_nr;
  1483. ),
  1484. TP_printk("avg=%d big_avg=%d iowait_avg=%d max_nr=%u big_max_nr=%u",
  1485. __entry->avg, __entry->big_avg, __entry->iowait_avg,
  1486. __entry->max_nr, __entry->big_max_nr)
  1487. );
  1488. TRACE_EVENT(core_ctl_eval_need,
  1489. TP_PROTO(unsigned int cpu, unsigned int old_need,
  1490. unsigned int new_need, unsigned int updated),
  1491. TP_ARGS(cpu, old_need, new_need, updated),
  1492. TP_STRUCT__entry(
  1493. __field(u32, cpu)
  1494. __field(u32, old_need)
  1495. __field(u32, new_need)
  1496. __field(u32, updated)
  1497. ),
  1498. TP_fast_assign(
  1499. __entry->cpu = cpu;
  1500. __entry->old_need = old_need;
  1501. __entry->new_need = new_need;
  1502. __entry->updated = updated;
  1503. ),
  1504. TP_printk("cpu=%u, old_need=%u, new_need=%u, updated=%u", __entry->cpu,
  1505. __entry->old_need, __entry->new_need, __entry->updated)
  1506. );
  1507. TRACE_EVENT(core_ctl_set_busy,
  1508. TP_PROTO(unsigned int cpu, unsigned int busy,
  1509. unsigned int old_is_busy, unsigned int is_busy),
  1510. TP_ARGS(cpu, busy, old_is_busy, is_busy),
  1511. TP_STRUCT__entry(
  1512. __field(u32, cpu)
  1513. __field(u32, busy)
  1514. __field(u32, old_is_busy)
  1515. __field(u32, is_busy)
  1516. __field(bool, high_irqload)
  1517. ),
  1518. TP_fast_assign(
  1519. __entry->cpu = cpu;
  1520. __entry->busy = busy;
  1521. __entry->old_is_busy = old_is_busy;
  1522. __entry->is_busy = is_busy;
  1523. __entry->high_irqload = sched_cpu_high_irqload(cpu);
  1524. ),
  1525. TP_printk("cpu=%u, busy=%u, old_is_busy=%u, new_is_busy=%u high_irqload=%d",
  1526. __entry->cpu, __entry->busy, __entry->old_is_busy,
  1527. __entry->is_busy, __entry->high_irqload)
  1528. );
  1529. TRACE_EVENT(core_ctl_set_boost,
  1530. TP_PROTO(u32 refcount, s32 ret),
  1531. TP_ARGS(refcount, ret),
  1532. TP_STRUCT__entry(
  1533. __field(u32, refcount)
  1534. __field(s32, ret)
  1535. ),
  1536. TP_fast_assign(
  1537. __entry->refcount = refcount;
  1538. __entry->ret = ret;
  1539. ),
  1540. TP_printk("refcount=%u, ret=%d", __entry->refcount, __entry->ret)
  1541. );
  1542. /*
  1543. * sched_isolate - called when cores are isolated/unisolated
  1544. *
  1545. * @acutal_mask: mask of cores actually isolated/unisolated
  1546. * @req_mask: mask of cores requested isolated/unisolated
  1547. * @online_mask: cpu online mask
  1548. * @time: amount of time in us it took to isolate/unisolate
  1549. * @isolate: 1 if isolating, 0 if unisolating
  1550. *
  1551. */
  1552. TRACE_EVENT(sched_isolate,
  1553. TP_PROTO(unsigned int requested_cpu, unsigned int isolated_cpus,
  1554. u64 start_time, unsigned char isolate),
  1555. TP_ARGS(requested_cpu, isolated_cpus, start_time, isolate),
  1556. TP_STRUCT__entry(
  1557. __field(u32, requested_cpu)
  1558. __field(u32, isolated_cpus)
  1559. __field(u32, time)
  1560. __field(unsigned char, isolate)
  1561. ),
  1562. TP_fast_assign(
  1563. __entry->requested_cpu = requested_cpu;
  1564. __entry->isolated_cpus = isolated_cpus;
  1565. __entry->time = div64_u64(sched_clock() - start_time, 1000);
  1566. __entry->isolate = isolate;
  1567. ),
  1568. TP_printk("iso cpu=%u cpus=0x%x time=%u us isolated=%d",
  1569. __entry->requested_cpu, __entry->isolated_cpus,
  1570. __entry->time, __entry->isolate)
  1571. );
  1572. TRACE_EVENT(sched_preempt_disable,
  1573. TP_PROTO(u64 delta, bool irqs_disabled,
  1574. unsigned long caddr0, unsigned long caddr1,
  1575. unsigned long caddr2, unsigned long caddr3),
  1576. TP_ARGS(delta, irqs_disabled, caddr0, caddr1, caddr2, caddr3),
  1577. TP_STRUCT__entry(
  1578. __field(u64, delta)
  1579. __field(bool, irqs_disabled)
  1580. __field(void*, caddr0)
  1581. __field(void*, caddr1)
  1582. __field(void*, caddr2)
  1583. __field(void*, caddr3)
  1584. ),
  1585. TP_fast_assign(
  1586. __entry->delta = delta;
  1587. __entry->irqs_disabled = irqs_disabled;
  1588. __entry->caddr0 = (void *)caddr0;
  1589. __entry->caddr1 = (void *)caddr1;
  1590. __entry->caddr2 = (void *)caddr2;
  1591. __entry->caddr3 = (void *)caddr3;
  1592. ),
  1593. TP_printk("delta=%llu(ns) irqs_d=%d Callers:(%pf<-%pf<-%pf<-%pf)",
  1594. __entry->delta, __entry->irqs_disabled,
  1595. __entry->caddr0, __entry->caddr1,
  1596. __entry->caddr2, __entry->caddr3)
  1597. );
  1598. #endif /* _TRACE_SCHED_H */
  1599. /* This part must be outside protection */
  1600. #include <trace/define_trace.h>