kthread.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Kernel thread helper functions.
  3. * Copyright (C) 2004 IBM Corporation, Rusty Russell.
  4. * Copyright (C) 2009 Red Hat, Inc.
  5. *
  6. * Creation is done via kthreadd, so that we get a clean environment
  7. * even if we're invoked from userspace (think modprobe, hotplug cpu,
  8. * etc.).
  9. */
  10. #include <uapi/linux/sched/types.h>
  11. #include <linux/mm.h>
  12. #include <linux/mmu_context.h>
  13. #include <linux/sched.h>
  14. #include <linux/sched/mm.h>
  15. #include <linux/sched/task.h>
  16. #include <linux/kthread.h>
  17. #include <linux/completion.h>
  18. #include <linux/err.h>
  19. #include <linux/cgroup.h>
  20. #include <linux/cpuset.h>
  21. #include <linux/unistd.h>
  22. #include <linux/file.h>
  23. #include <linux/export.h>
  24. #include <linux/mutex.h>
  25. #include <linux/slab.h>
  26. #include <linux/freezer.h>
  27. #include <linux/ptrace.h>
  28. #include <linux/uaccess.h>
  29. #include <linux/numa.h>
  30. #include <linux/sched/isolation.h>
  31. #include <trace/events/sched.h>
  32. static DEFINE_SPINLOCK(kthread_create_lock);
  33. static LIST_HEAD(kthread_create_list);
  34. struct task_struct *kthreadd_task;
  35. struct kthread_create_info
  36. {
  37. /* Information passed to kthread() from kthreadd. */
  38. char *full_name;
  39. int (*threadfn)(void *data);
  40. void *data;
  41. int node;
  42. /* Result passed back to kthread_create() from kthreadd. */
  43. struct task_struct *result;
  44. struct completion *done;
  45. struct list_head list;
  46. };
  47. struct kthread {
  48. unsigned long flags;
  49. unsigned int cpu;
  50. int result;
  51. int (*threadfn)(void *);
  52. void *data;
  53. struct completion parked;
  54. struct completion exited;
  55. #ifdef CONFIG_BLK_CGROUP
  56. struct cgroup_subsys_state *blkcg_css;
  57. #endif
  58. /* To store the full name if task comm is truncated. */
  59. char *full_name;
  60. };
  61. enum KTHREAD_BITS {
  62. KTHREAD_IS_PER_CPU = 0,
  63. KTHREAD_SHOULD_STOP,
  64. KTHREAD_SHOULD_PARK,
  65. };
  66. static inline struct kthread *to_kthread(struct task_struct *k)
  67. {
  68. WARN_ON(!(k->flags & PF_KTHREAD));
  69. return k->worker_private;
  70. }
  71. /*
  72. * Variant of to_kthread() that doesn't assume @p is a kthread.
  73. *
  74. * Per construction; when:
  75. *
  76. * (p->flags & PF_KTHREAD) && p->worker_private
  77. *
  78. * the task is both a kthread and struct kthread is persistent. However
  79. * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
  80. * begin_new_exec()).
  81. */
  82. static inline struct kthread *__to_kthread(struct task_struct *p)
  83. {
  84. void *kthread = p->worker_private;
  85. if (kthread && !(p->flags & PF_KTHREAD))
  86. kthread = NULL;
  87. return kthread;
  88. }
  89. void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
  90. {
  91. struct kthread *kthread = to_kthread(tsk);
  92. if (!kthread || !kthread->full_name) {
  93. __get_task_comm(buf, buf_size, tsk);
  94. return;
  95. }
  96. strscpy_pad(buf, kthread->full_name, buf_size);
  97. }
  98. bool set_kthread_struct(struct task_struct *p)
  99. {
  100. struct kthread *kthread;
  101. if (WARN_ON_ONCE(to_kthread(p)))
  102. return false;
  103. kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
  104. if (!kthread)
  105. return false;
  106. init_completion(&kthread->exited);
  107. init_completion(&kthread->parked);
  108. p->vfork_done = &kthread->exited;
  109. p->worker_private = kthread;
  110. return true;
  111. }
  112. void free_kthread_struct(struct task_struct *k)
  113. {
  114. struct kthread *kthread;
  115. /*
  116. * Can be NULL if kmalloc() in set_kthread_struct() failed.
  117. */
  118. kthread = to_kthread(k);
  119. if (!kthread)
  120. return;
  121. #ifdef CONFIG_BLK_CGROUP
  122. WARN_ON_ONCE(kthread->blkcg_css);
  123. #endif
  124. k->worker_private = NULL;
  125. kfree(kthread->full_name);
  126. kfree(kthread);
  127. }
  128. /**
  129. * kthread_should_stop - should this kthread return now?
  130. *
  131. * When someone calls kthread_stop() on your kthread, it will be woken
  132. * and this will return true. You should then return, and your return
  133. * value will be passed through to kthread_stop().
  134. */
  135. bool kthread_should_stop(void)
  136. {
  137. return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
  138. }
  139. EXPORT_SYMBOL(kthread_should_stop);
  140. static bool __kthread_should_park(struct task_struct *k)
  141. {
  142. return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
  143. }
  144. /**
  145. * kthread_should_park - should this kthread park now?
  146. *
  147. * When someone calls kthread_park() on your kthread, it will be woken
  148. * and this will return true. You should then do the necessary
  149. * cleanup and call kthread_parkme()
  150. *
  151. * Similar to kthread_should_stop(), but this keeps the thread alive
  152. * and in a park position. kthread_unpark() "restarts" the thread and
  153. * calls the thread function again.
  154. */
  155. bool kthread_should_park(void)
  156. {
  157. return __kthread_should_park(current);
  158. }
  159. EXPORT_SYMBOL_GPL(kthread_should_park);
  160. bool kthread_should_stop_or_park(void)
  161. {
  162. struct kthread *kthread = __to_kthread(current);
  163. if (!kthread)
  164. return false;
  165. return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
  166. }
  167. /**
  168. * kthread_freezable_should_stop - should this freezable kthread return now?
  169. * @was_frozen: optional out parameter, indicates whether %current was frozen
  170. *
  171. * kthread_should_stop() for freezable kthreads, which will enter
  172. * refrigerator if necessary. This function is safe from kthread_stop() /
  173. * freezer deadlock and freezable kthreads should use this function instead
  174. * of calling try_to_freeze() directly.
  175. */
  176. bool kthread_freezable_should_stop(bool *was_frozen)
  177. {
  178. bool frozen = false;
  179. might_sleep();
  180. if (unlikely(freezing(current)))
  181. frozen = __refrigerator(true);
  182. if (was_frozen)
  183. *was_frozen = frozen;
  184. return kthread_should_stop();
  185. }
  186. EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
  187. /**
  188. * kthread_func - return the function specified on kthread creation
  189. * @task: kthread task in question
  190. *
  191. * Returns NULL if the task is not a kthread.
  192. */
  193. void *kthread_func(struct task_struct *task)
  194. {
  195. struct kthread *kthread = __to_kthread(task);
  196. if (kthread)
  197. return kthread->threadfn;
  198. return NULL;
  199. }
  200. EXPORT_SYMBOL_GPL(kthread_func);
  201. /**
  202. * kthread_data - return data value specified on kthread creation
  203. * @task: kthread task in question
  204. *
  205. * Return the data value specified when kthread @task was created.
  206. * The caller is responsible for ensuring the validity of @task when
  207. * calling this function.
  208. */
  209. void *kthread_data(struct task_struct *task)
  210. {
  211. return to_kthread(task)->data;
  212. }
  213. EXPORT_SYMBOL_GPL(kthread_data);
  214. /**
  215. * kthread_probe_data - speculative version of kthread_data()
  216. * @task: possible kthread task in question
  217. *
  218. * @task could be a kthread task. Return the data value specified when it
  219. * was created if accessible. If @task isn't a kthread task or its data is
  220. * inaccessible for any reason, %NULL is returned. This function requires
  221. * that @task itself is safe to dereference.
  222. */
  223. void *kthread_probe_data(struct task_struct *task)
  224. {
  225. struct kthread *kthread = __to_kthread(task);
  226. void *data = NULL;
  227. if (kthread)
  228. copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
  229. return data;
  230. }
  231. static void __kthread_parkme(struct kthread *self)
  232. {
  233. for (;;) {
  234. /*
  235. * TASK_PARKED is a special state; we must serialize against
  236. * possible pending wakeups to avoid store-store collisions on
  237. * task->state.
  238. *
  239. * Such a collision might possibly result in the task state
  240. * changin from TASK_PARKED and us failing the
  241. * wait_task_inactive() in kthread_park().
  242. */
  243. set_special_state(TASK_PARKED);
  244. if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
  245. break;
  246. /*
  247. * Thread is going to call schedule(), do not preempt it,
  248. * or the caller of kthread_park() may spend more time in
  249. * wait_task_inactive().
  250. */
  251. preempt_disable();
  252. complete(&self->parked);
  253. schedule_preempt_disabled();
  254. preempt_enable();
  255. }
  256. __set_current_state(TASK_RUNNING);
  257. }
  258. void kthread_parkme(void)
  259. {
  260. __kthread_parkme(to_kthread(current));
  261. }
  262. EXPORT_SYMBOL_GPL(kthread_parkme);
  263. /**
  264. * kthread_exit - Cause the current kthread return @result to kthread_stop().
  265. * @result: The integer value to return to kthread_stop().
  266. *
  267. * While kthread_exit can be called directly, it exists so that
  268. * functions which do some additional work in non-modular code such as
  269. * module_put_and_kthread_exit can be implemented.
  270. *
  271. * Does not return.
  272. */
  273. void __noreturn kthread_exit(long result)
  274. {
  275. struct kthread *kthread = to_kthread(current);
  276. kthread->result = result;
  277. do_exit(0);
  278. }
  279. EXPORT_SYMBOL(kthread_exit);
  280. /**
  281. * kthread_complete_and_exit - Exit the current kthread.
  282. * @comp: Completion to complete
  283. * @code: The integer value to return to kthread_stop().
  284. *
  285. * If present, complete @comp and then return code to kthread_stop().
  286. *
  287. * A kernel thread whose module may be removed after the completion of
  288. * @comp can use this function to exit safely.
  289. *
  290. * Does not return.
  291. */
  292. void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
  293. {
  294. if (comp)
  295. complete(comp);
  296. kthread_exit(code);
  297. }
  298. EXPORT_SYMBOL(kthread_complete_and_exit);
  299. static int kthread(void *_create)
  300. {
  301. static const struct sched_param param = { .sched_priority = 0 };
  302. /* Copy data: it's on kthread's stack */
  303. struct kthread_create_info *create = _create;
  304. int (*threadfn)(void *data) = create->threadfn;
  305. void *data = create->data;
  306. struct completion *done;
  307. struct kthread *self;
  308. int ret;
  309. self = to_kthread(current);
  310. /* Release the structure when caller killed by a fatal signal. */
  311. done = xchg(&create->done, NULL);
  312. if (!done) {
  313. kfree(create->full_name);
  314. kfree(create);
  315. kthread_exit(-EINTR);
  316. }
  317. self->full_name = create->full_name;
  318. self->threadfn = threadfn;
  319. self->data = data;
  320. /*
  321. * The new thread inherited kthreadd's priority and CPU mask. Reset
  322. * back to default in case they have been changed.
  323. */
  324. sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
  325. set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
  326. /* OK, tell user we're spawned, wait for stop or wakeup */
  327. __set_current_state(TASK_UNINTERRUPTIBLE);
  328. create->result = current;
  329. /*
  330. * Thread is going to call schedule(), do not preempt it,
  331. * or the creator may spend more time in wait_task_inactive().
  332. */
  333. preempt_disable();
  334. complete(done);
  335. schedule_preempt_disabled();
  336. preempt_enable();
  337. ret = -EINTR;
  338. if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
  339. cgroup_kthread_ready();
  340. __kthread_parkme(self);
  341. ret = threadfn(data);
  342. }
  343. kthread_exit(ret);
  344. }
  345. /* called from kernel_clone() to get node information for about to be created task */
  346. int tsk_fork_get_node(struct task_struct *tsk)
  347. {
  348. #ifdef CONFIG_NUMA
  349. if (tsk == kthreadd_task)
  350. return tsk->pref_node_fork;
  351. #endif
  352. return NUMA_NO_NODE;
  353. }
  354. static void create_kthread(struct kthread_create_info *create)
  355. {
  356. int pid;
  357. #ifdef CONFIG_NUMA
  358. current->pref_node_fork = create->node;
  359. #endif
  360. /* We want our own signal handler (we take no signals by default). */
  361. pid = kernel_thread(kthread, create, create->full_name,
  362. CLONE_FS | CLONE_FILES | SIGCHLD);
  363. if (pid < 0) {
  364. /* Release the structure when caller killed by a fatal signal. */
  365. struct completion *done = xchg(&create->done, NULL);
  366. kfree(create->full_name);
  367. if (!done) {
  368. kfree(create);
  369. return;
  370. }
  371. create->result = ERR_PTR(pid);
  372. complete(done);
  373. }
  374. }
  375. static __printf(4, 0)
  376. struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
  377. void *data, int node,
  378. const char namefmt[],
  379. va_list args)
  380. {
  381. DECLARE_COMPLETION_ONSTACK(done);
  382. struct task_struct *task;
  383. struct kthread_create_info *create = kmalloc(sizeof(*create),
  384. GFP_KERNEL);
  385. if (!create)
  386. return ERR_PTR(-ENOMEM);
  387. create->threadfn = threadfn;
  388. create->data = data;
  389. create->node = node;
  390. create->done = &done;
  391. create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
  392. if (!create->full_name) {
  393. task = ERR_PTR(-ENOMEM);
  394. goto free_create;
  395. }
  396. spin_lock(&kthread_create_lock);
  397. list_add_tail(&create->list, &kthread_create_list);
  398. spin_unlock(&kthread_create_lock);
  399. wake_up_process(kthreadd_task);
  400. /*
  401. * Wait for completion in killable state, for I might be chosen by
  402. * the OOM killer while kthreadd is trying to allocate memory for
  403. * new kernel thread.
  404. */
  405. if (unlikely(wait_for_completion_killable(&done))) {
  406. /*
  407. * If I was killed by a fatal signal before kthreadd (or new
  408. * kernel thread) calls complete(), leave the cleanup of this
  409. * structure to that thread.
  410. */
  411. if (xchg(&create->done, NULL))
  412. return ERR_PTR(-EINTR);
  413. /*
  414. * kthreadd (or new kernel thread) will call complete()
  415. * shortly.
  416. */
  417. wait_for_completion(&done);
  418. }
  419. task = create->result;
  420. free_create:
  421. kfree(create);
  422. return task;
  423. }
  424. /**
  425. * kthread_create_on_node - create a kthread.
  426. * @threadfn: the function to run until signal_pending(current).
  427. * @data: data ptr for @threadfn.
  428. * @node: task and thread structures for the thread are allocated on this node
  429. * @namefmt: printf-style name for the thread.
  430. *
  431. * Description: This helper function creates and names a kernel
  432. * thread. The thread will be stopped: use wake_up_process() to start
  433. * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
  434. * is affine to all CPUs.
  435. *
  436. * If thread is going to be bound on a particular cpu, give its node
  437. * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
  438. * When woken, the thread will run @threadfn() with @data as its
  439. * argument. @threadfn() can either return directly if it is a
  440. * standalone thread for which no one will call kthread_stop(), or
  441. * return when 'kthread_should_stop()' is true (which means
  442. * kthread_stop() has been called). The return value should be zero
  443. * or a negative error number; it will be passed to kthread_stop().
  444. *
  445. * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
  446. */
  447. struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
  448. void *data, int node,
  449. const char namefmt[],
  450. ...)
  451. {
  452. struct task_struct *task;
  453. va_list args;
  454. va_start(args, namefmt);
  455. task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
  456. va_end(args);
  457. return task;
  458. }
  459. EXPORT_SYMBOL(kthread_create_on_node);
  460. static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
  461. {
  462. unsigned long flags;
  463. if (!wait_task_inactive(p, state)) {
  464. WARN_ON(1);
  465. return;
  466. }
  467. /* It's safe because the task is inactive. */
  468. raw_spin_lock_irqsave(&p->pi_lock, flags);
  469. do_set_cpus_allowed(p, mask);
  470. p->flags |= PF_NO_SETAFFINITY;
  471. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  472. }
  473. static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
  474. {
  475. __kthread_bind_mask(p, cpumask_of(cpu), state);
  476. }
  477. void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
  478. {
  479. __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
  480. }
  481. /**
  482. * kthread_bind - bind a just-created kthread to a cpu.
  483. * @p: thread created by kthread_create().
  484. * @cpu: cpu (might not be online, must be possible) for @k to run on.
  485. *
  486. * Description: This function is equivalent to set_cpus_allowed(),
  487. * except that @cpu doesn't need to be online, and the thread must be
  488. * stopped (i.e., just returned from kthread_create()).
  489. */
  490. void kthread_bind(struct task_struct *p, unsigned int cpu)
  491. {
  492. __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
  493. }
  494. EXPORT_SYMBOL(kthread_bind);
  495. /**
  496. * kthread_create_on_cpu - Create a cpu bound kthread
  497. * @threadfn: the function to run until signal_pending(current).
  498. * @data: data ptr for @threadfn.
  499. * @cpu: The cpu on which the thread should be bound,
  500. * @namefmt: printf-style name for the thread. Format is restricted
  501. * to "name.*%u". Code fills in cpu number.
  502. *
  503. * Description: This helper function creates and names a kernel thread
  504. */
  505. struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
  506. void *data, unsigned int cpu,
  507. const char *namefmt)
  508. {
  509. struct task_struct *p;
  510. p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
  511. cpu);
  512. if (IS_ERR(p))
  513. return p;
  514. kthread_bind(p, cpu);
  515. /* CPU hotplug need to bind once again when unparking the thread. */
  516. to_kthread(p)->cpu = cpu;
  517. return p;
  518. }
  519. EXPORT_SYMBOL(kthread_create_on_cpu);
  520. void kthread_set_per_cpu(struct task_struct *k, int cpu)
  521. {
  522. struct kthread *kthread = to_kthread(k);
  523. if (!kthread)
  524. return;
  525. WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
  526. if (cpu < 0) {
  527. clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
  528. return;
  529. }
  530. kthread->cpu = cpu;
  531. set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
  532. }
  533. bool kthread_is_per_cpu(struct task_struct *p)
  534. {
  535. struct kthread *kthread = __to_kthread(p);
  536. if (!kthread)
  537. return false;
  538. return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
  539. }
  540. /**
  541. * kthread_unpark - unpark a thread created by kthread_create().
  542. * @k: thread created by kthread_create().
  543. *
  544. * Sets kthread_should_park() for @k to return false, wakes it, and
  545. * waits for it to return. If the thread is marked percpu then its
  546. * bound to the cpu again.
  547. */
  548. void kthread_unpark(struct task_struct *k)
  549. {
  550. struct kthread *kthread = to_kthread(k);
  551. if (!test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))
  552. return;
  553. /*
  554. * Newly created kthread was parked when the CPU was offline.
  555. * The binding was lost and we need to set it again.
  556. */
  557. if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
  558. __kthread_bind(k, kthread->cpu, TASK_PARKED);
  559. clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  560. /*
  561. * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
  562. */
  563. wake_up_state(k, TASK_PARKED);
  564. }
  565. EXPORT_SYMBOL_GPL(kthread_unpark);
  566. /**
  567. * kthread_park - park a thread created by kthread_create().
  568. * @k: thread created by kthread_create().
  569. *
  570. * Sets kthread_should_park() for @k to return true, wakes it, and
  571. * waits for it to return. This can also be called after kthread_create()
  572. * instead of calling wake_up_process(): the thread will park without
  573. * calling threadfn().
  574. *
  575. * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
  576. * If called by the kthread itself just the park bit is set.
  577. */
  578. int kthread_park(struct task_struct *k)
  579. {
  580. struct kthread *kthread = to_kthread(k);
  581. if (WARN_ON(k->flags & PF_EXITING))
  582. return -ENOSYS;
  583. if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
  584. return -EBUSY;
  585. set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  586. if (k != current) {
  587. wake_up_process(k);
  588. /*
  589. * Wait for __kthread_parkme() to complete(), this means we
  590. * _will_ have TASK_PARKED and are about to call schedule().
  591. */
  592. wait_for_completion(&kthread->parked);
  593. /*
  594. * Now wait for that schedule() to complete and the task to
  595. * get scheduled out.
  596. */
  597. WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
  598. }
  599. return 0;
  600. }
  601. EXPORT_SYMBOL_GPL(kthread_park);
  602. /**
  603. * kthread_stop - stop a thread created by kthread_create().
  604. * @k: thread created by kthread_create().
  605. *
  606. * Sets kthread_should_stop() for @k to return true, wakes it, and
  607. * waits for it to exit. This can also be called after kthread_create()
  608. * instead of calling wake_up_process(): the thread will exit without
  609. * calling threadfn().
  610. *
  611. * If threadfn() may call kthread_exit() itself, the caller must ensure
  612. * task_struct can't go away.
  613. *
  614. * Returns the result of threadfn(), or %-EINTR if wake_up_process()
  615. * was never called.
  616. */
  617. int kthread_stop(struct task_struct *k)
  618. {
  619. struct kthread *kthread;
  620. int ret;
  621. trace_sched_kthread_stop(k);
  622. get_task_struct(k);
  623. kthread = to_kthread(k);
  624. set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
  625. kthread_unpark(k);
  626. set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
  627. wake_up_process(k);
  628. wait_for_completion(&kthread->exited);
  629. ret = kthread->result;
  630. put_task_struct(k);
  631. trace_sched_kthread_stop_ret(ret);
  632. return ret;
  633. }
  634. EXPORT_SYMBOL(kthread_stop);
  635. /**
  636. * kthread_stop_put - stop a thread and put its task struct
  637. * @k: thread created by kthread_create().
  638. *
  639. * Stops a thread created by kthread_create() and put its task_struct.
  640. * Only use when holding an extra task struct reference obtained by
  641. * calling get_task_struct().
  642. */
  643. int kthread_stop_put(struct task_struct *k)
  644. {
  645. int ret;
  646. ret = kthread_stop(k);
  647. put_task_struct(k);
  648. return ret;
  649. }
  650. EXPORT_SYMBOL(kthread_stop_put);
  651. int kthreadd(void *unused)
  652. {
  653. struct task_struct *tsk = current;
  654. /* Setup a clean context for our children to inherit. */
  655. set_task_comm(tsk, "kthreadd");
  656. ignore_signals(tsk);
  657. set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
  658. set_mems_allowed(node_states[N_MEMORY]);
  659. current->flags |= PF_NOFREEZE;
  660. cgroup_init_kthreadd();
  661. for (;;) {
  662. set_current_state(TASK_INTERRUPTIBLE);
  663. if (list_empty(&kthread_create_list))
  664. schedule();
  665. __set_current_state(TASK_RUNNING);
  666. spin_lock(&kthread_create_lock);
  667. while (!list_empty(&kthread_create_list)) {
  668. struct kthread_create_info *create;
  669. create = list_entry(kthread_create_list.next,
  670. struct kthread_create_info, list);
  671. list_del_init(&create->list);
  672. spin_unlock(&kthread_create_lock);
  673. create_kthread(create);
  674. spin_lock(&kthread_create_lock);
  675. }
  676. spin_unlock(&kthread_create_lock);
  677. }
  678. return 0;
  679. }
  680. void __kthread_init_worker(struct kthread_worker *worker,
  681. const char *name,
  682. struct lock_class_key *key)
  683. {
  684. memset(worker, 0, sizeof(struct kthread_worker));
  685. raw_spin_lock_init(&worker->lock);
  686. lockdep_set_class_and_name(&worker->lock, key, name);
  687. INIT_LIST_HEAD(&worker->work_list);
  688. INIT_LIST_HEAD(&worker->delayed_work_list);
  689. }
  690. EXPORT_SYMBOL_GPL(__kthread_init_worker);
  691. /**
  692. * kthread_worker_fn - kthread function to process kthread_worker
  693. * @worker_ptr: pointer to initialized kthread_worker
  694. *
  695. * This function implements the main cycle of kthread worker. It processes
  696. * work_list until it is stopped with kthread_stop(). It sleeps when the queue
  697. * is empty.
  698. *
  699. * The works are not allowed to keep any locks, disable preemption or interrupts
  700. * when they finish. There is defined a safe point for freezing when one work
  701. * finishes and before a new one is started.
  702. *
  703. * Also the works must not be handled by more than one worker at the same time,
  704. * see also kthread_queue_work().
  705. */
  706. int kthread_worker_fn(void *worker_ptr)
  707. {
  708. struct kthread_worker *worker = worker_ptr;
  709. struct kthread_work *work;
  710. /*
  711. * FIXME: Update the check and remove the assignment when all kthread
  712. * worker users are created using kthread_create_worker*() functions.
  713. */
  714. WARN_ON(worker->task && worker->task != current);
  715. worker->task = current;
  716. if (worker->flags & KTW_FREEZABLE)
  717. set_freezable();
  718. repeat:
  719. set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
  720. if (kthread_should_stop()) {
  721. __set_current_state(TASK_RUNNING);
  722. raw_spin_lock_irq(&worker->lock);
  723. worker->task = NULL;
  724. raw_spin_unlock_irq(&worker->lock);
  725. return 0;
  726. }
  727. work = NULL;
  728. raw_spin_lock_irq(&worker->lock);
  729. if (!list_empty(&worker->work_list)) {
  730. work = list_first_entry(&worker->work_list,
  731. struct kthread_work, node);
  732. list_del_init(&work->node);
  733. }
  734. worker->current_work = work;
  735. raw_spin_unlock_irq(&worker->lock);
  736. if (work) {
  737. kthread_work_func_t func = work->func;
  738. __set_current_state(TASK_RUNNING);
  739. trace_sched_kthread_work_execute_start(work);
  740. work->func(work);
  741. /*
  742. * Avoid dereferencing work after this point. The trace
  743. * event only cares about the address.
  744. */
  745. trace_sched_kthread_work_execute_end(work, func);
  746. } else if (!freezing(current)) {
  747. schedule();
  748. } else {
  749. /*
  750. * Handle the case where the current remains
  751. * TASK_INTERRUPTIBLE. try_to_freeze() expects
  752. * the current to be TASK_RUNNING.
  753. */
  754. __set_current_state(TASK_RUNNING);
  755. }
  756. try_to_freeze();
  757. cond_resched();
  758. goto repeat;
  759. }
  760. EXPORT_SYMBOL_GPL(kthread_worker_fn);
  761. static __printf(3, 0) struct kthread_worker *
  762. __kthread_create_worker(int cpu, unsigned int flags,
  763. const char namefmt[], va_list args)
  764. {
  765. struct kthread_worker *worker;
  766. struct task_struct *task;
  767. int node = NUMA_NO_NODE;
  768. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  769. if (!worker)
  770. return ERR_PTR(-ENOMEM);
  771. kthread_init_worker(worker);
  772. if (cpu >= 0)
  773. node = cpu_to_node(cpu);
  774. task = __kthread_create_on_node(kthread_worker_fn, worker,
  775. node, namefmt, args);
  776. if (IS_ERR(task))
  777. goto fail_task;
  778. if (cpu >= 0)
  779. kthread_bind(task, cpu);
  780. worker->flags = flags;
  781. worker->task = task;
  782. wake_up_process(task);
  783. return worker;
  784. fail_task:
  785. kfree(worker);
  786. return ERR_CAST(task);
  787. }
  788. /**
  789. * kthread_create_worker - create a kthread worker
  790. * @flags: flags modifying the default behavior of the worker
  791. * @namefmt: printf-style name for the kthread worker (task).
  792. *
  793. * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
  794. * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
  795. * when the caller was killed by a fatal signal.
  796. */
  797. struct kthread_worker *
  798. kthread_create_worker(unsigned int flags, const char namefmt[], ...)
  799. {
  800. struct kthread_worker *worker;
  801. va_list args;
  802. va_start(args, namefmt);
  803. worker = __kthread_create_worker(-1, flags, namefmt, args);
  804. va_end(args);
  805. return worker;
  806. }
  807. EXPORT_SYMBOL(kthread_create_worker);
  808. /**
  809. * kthread_create_worker_on_cpu - create a kthread worker and bind it
  810. * to a given CPU and the associated NUMA node.
  811. * @cpu: CPU number
  812. * @flags: flags modifying the default behavior of the worker
  813. * @namefmt: printf-style name for the kthread worker (task).
  814. *
  815. * Use a valid CPU number if you want to bind the kthread worker
  816. * to the given CPU and the associated NUMA node.
  817. *
  818. * A good practice is to add the cpu number also into the worker name.
  819. * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
  820. *
  821. * CPU hotplug:
  822. * The kthread worker API is simple and generic. It just provides a way
  823. * to create, use, and destroy workers.
  824. *
  825. * It is up to the API user how to handle CPU hotplug. They have to decide
  826. * how to handle pending work items, prevent queuing new ones, and
  827. * restore the functionality when the CPU goes off and on. There are a
  828. * few catches:
  829. *
  830. * - CPU affinity gets lost when it is scheduled on an offline CPU.
  831. *
  832. * - The worker might not exist when the CPU was off when the user
  833. * created the workers.
  834. *
  835. * Good practice is to implement two CPU hotplug callbacks and to
  836. * destroy/create the worker when the CPU goes down/up.
  837. *
  838. * Return:
  839. * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
  840. * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
  841. * when the caller was killed by a fatal signal.
  842. */
  843. struct kthread_worker *
  844. kthread_create_worker_on_cpu(int cpu, unsigned int flags,
  845. const char namefmt[], ...)
  846. {
  847. struct kthread_worker *worker;
  848. va_list args;
  849. va_start(args, namefmt);
  850. worker = __kthread_create_worker(cpu, flags, namefmt, args);
  851. va_end(args);
  852. return worker;
  853. }
  854. EXPORT_SYMBOL(kthread_create_worker_on_cpu);
  855. /*
  856. * Returns true when the work could not be queued at the moment.
  857. * It happens when it is already pending in a worker list
  858. * or when it is being cancelled.
  859. */
  860. static inline bool queuing_blocked(struct kthread_worker *worker,
  861. struct kthread_work *work)
  862. {
  863. lockdep_assert_held(&worker->lock);
  864. return !list_empty(&work->node) || work->canceling;
  865. }
  866. static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
  867. struct kthread_work *work)
  868. {
  869. lockdep_assert_held(&worker->lock);
  870. WARN_ON_ONCE(!list_empty(&work->node));
  871. /* Do not use a work with >1 worker, see kthread_queue_work() */
  872. WARN_ON_ONCE(work->worker && work->worker != worker);
  873. }
  874. /* insert @work before @pos in @worker */
  875. static void kthread_insert_work(struct kthread_worker *worker,
  876. struct kthread_work *work,
  877. struct list_head *pos)
  878. {
  879. kthread_insert_work_sanity_check(worker, work);
  880. trace_sched_kthread_work_queue_work(worker, work);
  881. list_add_tail(&work->node, pos);
  882. work->worker = worker;
  883. if (!worker->current_work && likely(worker->task))
  884. wake_up_process(worker->task);
  885. }
  886. /**
  887. * kthread_queue_work - queue a kthread_work
  888. * @worker: target kthread_worker
  889. * @work: kthread_work to queue
  890. *
  891. * Queue @work to work processor @task for async execution. @task
  892. * must have been created with kthread_worker_create(). Returns %true
  893. * if @work was successfully queued, %false if it was already pending.
  894. *
  895. * Reinitialize the work if it needs to be used by another worker.
  896. * For example, when the worker was stopped and started again.
  897. */
  898. bool kthread_queue_work(struct kthread_worker *worker,
  899. struct kthread_work *work)
  900. {
  901. bool ret = false;
  902. unsigned long flags;
  903. raw_spin_lock_irqsave(&worker->lock, flags);
  904. if (!queuing_blocked(worker, work)) {
  905. kthread_insert_work(worker, work, &worker->work_list);
  906. ret = true;
  907. }
  908. raw_spin_unlock_irqrestore(&worker->lock, flags);
  909. return ret;
  910. }
  911. EXPORT_SYMBOL_GPL(kthread_queue_work);
  912. /**
  913. * kthread_delayed_work_timer_fn - callback that queues the associated kthread
  914. * delayed work when the timer expires.
  915. * @t: pointer to the expired timer
  916. *
  917. * The format of the function is defined by struct timer_list.
  918. * It should have been called from irqsafe timer with irq already off.
  919. */
  920. void kthread_delayed_work_timer_fn(struct timer_list *t)
  921. {
  922. struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
  923. struct kthread_work *work = &dwork->work;
  924. struct kthread_worker *worker = work->worker;
  925. unsigned long flags;
  926. /*
  927. * This might happen when a pending work is reinitialized.
  928. * It means that it is used a wrong way.
  929. */
  930. if (WARN_ON_ONCE(!worker))
  931. return;
  932. raw_spin_lock_irqsave(&worker->lock, flags);
  933. /* Work must not be used with >1 worker, see kthread_queue_work(). */
  934. WARN_ON_ONCE(work->worker != worker);
  935. /* Move the work from worker->delayed_work_list. */
  936. WARN_ON_ONCE(list_empty(&work->node));
  937. list_del_init(&work->node);
  938. if (!work->canceling)
  939. kthread_insert_work(worker, work, &worker->work_list);
  940. raw_spin_unlock_irqrestore(&worker->lock, flags);
  941. }
  942. EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
  943. static void __kthread_queue_delayed_work(struct kthread_worker *worker,
  944. struct kthread_delayed_work *dwork,
  945. unsigned long delay)
  946. {
  947. struct timer_list *timer = &dwork->timer;
  948. struct kthread_work *work = &dwork->work;
  949. WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
  950. /*
  951. * If @delay is 0, queue @dwork->work immediately. This is for
  952. * both optimization and correctness. The earliest @timer can
  953. * expire is on the closest next tick and delayed_work users depend
  954. * on that there's no such delay when @delay is 0.
  955. */
  956. if (!delay) {
  957. kthread_insert_work(worker, work, &worker->work_list);
  958. return;
  959. }
  960. /* Be paranoid and try to detect possible races already now. */
  961. kthread_insert_work_sanity_check(worker, work);
  962. list_add(&work->node, &worker->delayed_work_list);
  963. work->worker = worker;
  964. timer->expires = jiffies + delay;
  965. add_timer(timer);
  966. }
  967. /**
  968. * kthread_queue_delayed_work - queue the associated kthread work
  969. * after a delay.
  970. * @worker: target kthread_worker
  971. * @dwork: kthread_delayed_work to queue
  972. * @delay: number of jiffies to wait before queuing
  973. *
  974. * If the work has not been pending it starts a timer that will queue
  975. * the work after the given @delay. If @delay is zero, it queues the
  976. * work immediately.
  977. *
  978. * Return: %false if the @work has already been pending. It means that
  979. * either the timer was running or the work was queued. It returns %true
  980. * otherwise.
  981. */
  982. bool kthread_queue_delayed_work(struct kthread_worker *worker,
  983. struct kthread_delayed_work *dwork,
  984. unsigned long delay)
  985. {
  986. struct kthread_work *work = &dwork->work;
  987. unsigned long flags;
  988. bool ret = false;
  989. raw_spin_lock_irqsave(&worker->lock, flags);
  990. if (!queuing_blocked(worker, work)) {
  991. __kthread_queue_delayed_work(worker, dwork, delay);
  992. ret = true;
  993. }
  994. raw_spin_unlock_irqrestore(&worker->lock, flags);
  995. return ret;
  996. }
  997. EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
  998. struct kthread_flush_work {
  999. struct kthread_work work;
  1000. struct completion done;
  1001. };
  1002. static void kthread_flush_work_fn(struct kthread_work *work)
  1003. {
  1004. struct kthread_flush_work *fwork =
  1005. container_of(work, struct kthread_flush_work, work);
  1006. complete(&fwork->done);
  1007. }
  1008. /**
  1009. * kthread_flush_work - flush a kthread_work
  1010. * @work: work to flush
  1011. *
  1012. * If @work is queued or executing, wait for it to finish execution.
  1013. */
  1014. void kthread_flush_work(struct kthread_work *work)
  1015. {
  1016. struct kthread_flush_work fwork = {
  1017. KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  1018. COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  1019. };
  1020. struct kthread_worker *worker;
  1021. bool noop = false;
  1022. worker = work->worker;
  1023. if (!worker)
  1024. return;
  1025. raw_spin_lock_irq(&worker->lock);
  1026. /* Work must not be used with >1 worker, see kthread_queue_work(). */
  1027. WARN_ON_ONCE(work->worker != worker);
  1028. if (!list_empty(&work->node))
  1029. kthread_insert_work(worker, &fwork.work, work->node.next);
  1030. else if (worker->current_work == work)
  1031. kthread_insert_work(worker, &fwork.work,
  1032. worker->work_list.next);
  1033. else
  1034. noop = true;
  1035. raw_spin_unlock_irq(&worker->lock);
  1036. if (!noop)
  1037. wait_for_completion(&fwork.done);
  1038. }
  1039. EXPORT_SYMBOL_GPL(kthread_flush_work);
  1040. /*
  1041. * Make sure that the timer is neither set nor running and could
  1042. * not manipulate the work list_head any longer.
  1043. *
  1044. * The function is called under worker->lock. The lock is temporary
  1045. * released but the timer can't be set again in the meantime.
  1046. */
  1047. static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
  1048. unsigned long *flags)
  1049. {
  1050. struct kthread_delayed_work *dwork =
  1051. container_of(work, struct kthread_delayed_work, work);
  1052. struct kthread_worker *worker = work->worker;
  1053. /*
  1054. * del_timer_sync() must be called to make sure that the timer
  1055. * callback is not running. The lock must be temporary released
  1056. * to avoid a deadlock with the callback. In the meantime,
  1057. * any queuing is blocked by setting the canceling counter.
  1058. */
  1059. work->canceling++;
  1060. raw_spin_unlock_irqrestore(&worker->lock, *flags);
  1061. del_timer_sync(&dwork->timer);
  1062. raw_spin_lock_irqsave(&worker->lock, *flags);
  1063. work->canceling--;
  1064. }
  1065. /*
  1066. * This function removes the work from the worker queue.
  1067. *
  1068. * It is called under worker->lock. The caller must make sure that
  1069. * the timer used by delayed work is not running, e.g. by calling
  1070. * kthread_cancel_delayed_work_timer().
  1071. *
  1072. * The work might still be in use when this function finishes. See the
  1073. * current_work proceed by the worker.
  1074. *
  1075. * Return: %true if @work was pending and successfully canceled,
  1076. * %false if @work was not pending
  1077. */
  1078. static bool __kthread_cancel_work(struct kthread_work *work)
  1079. {
  1080. /*
  1081. * Try to remove the work from a worker list. It might either
  1082. * be from worker->work_list or from worker->delayed_work_list.
  1083. */
  1084. if (!list_empty(&work->node)) {
  1085. list_del_init(&work->node);
  1086. return true;
  1087. }
  1088. return false;
  1089. }
  1090. /**
  1091. * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
  1092. * @worker: kthread worker to use
  1093. * @dwork: kthread delayed work to queue
  1094. * @delay: number of jiffies to wait before queuing
  1095. *
  1096. * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
  1097. * modify @dwork's timer so that it expires after @delay. If @delay is zero,
  1098. * @work is guaranteed to be queued immediately.
  1099. *
  1100. * Return: %false if @dwork was idle and queued, %true otherwise.
  1101. *
  1102. * A special case is when the work is being canceled in parallel.
  1103. * It might be caused either by the real kthread_cancel_delayed_work_sync()
  1104. * or yet another kthread_mod_delayed_work() call. We let the other command
  1105. * win and return %true here. The return value can be used for reference
  1106. * counting and the number of queued works stays the same. Anyway, the caller
  1107. * is supposed to synchronize these operations a reasonable way.
  1108. *
  1109. * This function is safe to call from any context including IRQ handler.
  1110. * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
  1111. * for details.
  1112. */
  1113. bool kthread_mod_delayed_work(struct kthread_worker *worker,
  1114. struct kthread_delayed_work *dwork,
  1115. unsigned long delay)
  1116. {
  1117. struct kthread_work *work = &dwork->work;
  1118. unsigned long flags;
  1119. int ret;
  1120. raw_spin_lock_irqsave(&worker->lock, flags);
  1121. /* Do not bother with canceling when never queued. */
  1122. if (!work->worker) {
  1123. ret = false;
  1124. goto fast_queue;
  1125. }
  1126. /* Work must not be used with >1 worker, see kthread_queue_work() */
  1127. WARN_ON_ONCE(work->worker != worker);
  1128. /*
  1129. * Temporary cancel the work but do not fight with another command
  1130. * that is canceling the work as well.
  1131. *
  1132. * It is a bit tricky because of possible races with another
  1133. * mod_delayed_work() and cancel_delayed_work() callers.
  1134. *
  1135. * The timer must be canceled first because worker->lock is released
  1136. * when doing so. But the work can be removed from the queue (list)
  1137. * only when it can be queued again so that the return value can
  1138. * be used for reference counting.
  1139. */
  1140. kthread_cancel_delayed_work_timer(work, &flags);
  1141. if (work->canceling) {
  1142. /* The number of works in the queue does not change. */
  1143. ret = true;
  1144. goto out;
  1145. }
  1146. ret = __kthread_cancel_work(work);
  1147. fast_queue:
  1148. __kthread_queue_delayed_work(worker, dwork, delay);
  1149. out:
  1150. raw_spin_unlock_irqrestore(&worker->lock, flags);
  1151. return ret;
  1152. }
  1153. EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
  1154. static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
  1155. {
  1156. struct kthread_worker *worker = work->worker;
  1157. unsigned long flags;
  1158. int ret = false;
  1159. if (!worker)
  1160. goto out;
  1161. raw_spin_lock_irqsave(&worker->lock, flags);
  1162. /* Work must not be used with >1 worker, see kthread_queue_work(). */
  1163. WARN_ON_ONCE(work->worker != worker);
  1164. if (is_dwork)
  1165. kthread_cancel_delayed_work_timer(work, &flags);
  1166. ret = __kthread_cancel_work(work);
  1167. if (worker->current_work != work)
  1168. goto out_fast;
  1169. /*
  1170. * The work is in progress and we need to wait with the lock released.
  1171. * In the meantime, block any queuing by setting the canceling counter.
  1172. */
  1173. work->canceling++;
  1174. raw_spin_unlock_irqrestore(&worker->lock, flags);
  1175. kthread_flush_work(work);
  1176. raw_spin_lock_irqsave(&worker->lock, flags);
  1177. work->canceling--;
  1178. out_fast:
  1179. raw_spin_unlock_irqrestore(&worker->lock, flags);
  1180. out:
  1181. return ret;
  1182. }
  1183. /**
  1184. * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
  1185. * @work: the kthread work to cancel
  1186. *
  1187. * Cancel @work and wait for its execution to finish. This function
  1188. * can be used even if the work re-queues itself. On return from this
  1189. * function, @work is guaranteed to be not pending or executing on any CPU.
  1190. *
  1191. * kthread_cancel_work_sync(&delayed_work->work) must not be used for
  1192. * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
  1193. *
  1194. * The caller must ensure that the worker on which @work was last
  1195. * queued can't be destroyed before this function returns.
  1196. *
  1197. * Return: %true if @work was pending, %false otherwise.
  1198. */
  1199. bool kthread_cancel_work_sync(struct kthread_work *work)
  1200. {
  1201. return __kthread_cancel_work_sync(work, false);
  1202. }
  1203. EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
  1204. /**
  1205. * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
  1206. * wait for it to finish.
  1207. * @dwork: the kthread delayed work to cancel
  1208. *
  1209. * This is kthread_cancel_work_sync() for delayed works.
  1210. *
  1211. * Return: %true if @dwork was pending, %false otherwise.
  1212. */
  1213. bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
  1214. {
  1215. return __kthread_cancel_work_sync(&dwork->work, true);
  1216. }
  1217. EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
  1218. /**
  1219. * kthread_flush_worker - flush all current works on a kthread_worker
  1220. * @worker: worker to flush
  1221. *
  1222. * Wait until all currently executing or pending works on @worker are
  1223. * finished.
  1224. */
  1225. void kthread_flush_worker(struct kthread_worker *worker)
  1226. {
  1227. struct kthread_flush_work fwork = {
  1228. KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  1229. COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  1230. };
  1231. kthread_queue_work(worker, &fwork.work);
  1232. wait_for_completion(&fwork.done);
  1233. }
  1234. EXPORT_SYMBOL_GPL(kthread_flush_worker);
  1235. /**
  1236. * kthread_destroy_worker - destroy a kthread worker
  1237. * @worker: worker to be destroyed
  1238. *
  1239. * Flush and destroy @worker. The simple flush is enough because the kthread
  1240. * worker API is used only in trivial scenarios. There are no multi-step state
  1241. * machines needed.
  1242. *
  1243. * Note that this function is not responsible for handling delayed work, so
  1244. * caller should be responsible for queuing or canceling all delayed work items
  1245. * before invoke this function.
  1246. */
  1247. void kthread_destroy_worker(struct kthread_worker *worker)
  1248. {
  1249. struct task_struct *task;
  1250. task = worker->task;
  1251. if (WARN_ON(!task))
  1252. return;
  1253. kthread_flush_worker(worker);
  1254. kthread_stop(task);
  1255. WARN_ON(!list_empty(&worker->delayed_work_list));
  1256. WARN_ON(!list_empty(&worker->work_list));
  1257. kfree(worker);
  1258. }
  1259. EXPORT_SYMBOL(kthread_destroy_worker);
  1260. /**
  1261. * kthread_use_mm - make the calling kthread operate on an address space
  1262. * @mm: address space to operate on
  1263. */
  1264. void kthread_use_mm(struct mm_struct *mm)
  1265. {
  1266. struct mm_struct *active_mm;
  1267. struct task_struct *tsk = current;
  1268. WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
  1269. WARN_ON_ONCE(tsk->mm);
  1270. /*
  1271. * It is possible for mm to be the same as tsk->active_mm, but
  1272. * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
  1273. * because these references are not equivalent.
  1274. */
  1275. mmgrab(mm);
  1276. task_lock(tsk);
  1277. /* Hold off tlb flush IPIs while switching mm's */
  1278. local_irq_disable();
  1279. active_mm = tsk->active_mm;
  1280. tsk->active_mm = mm;
  1281. tsk->mm = mm;
  1282. membarrier_update_current_mm(mm);
  1283. switch_mm_irqs_off(active_mm, mm, tsk);
  1284. local_irq_enable();
  1285. task_unlock(tsk);
  1286. #ifdef finish_arch_post_lock_switch
  1287. finish_arch_post_lock_switch();
  1288. #endif
  1289. /*
  1290. * When a kthread starts operating on an address space, the loop
  1291. * in membarrier_{private,global}_expedited() may not observe
  1292. * that tsk->mm, and not issue an IPI. Membarrier requires a
  1293. * memory barrier after storing to tsk->mm, before accessing
  1294. * user-space memory. A full memory barrier for membarrier
  1295. * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
  1296. * mmdrop_lazy_tlb().
  1297. */
  1298. mmdrop_lazy_tlb(active_mm);
  1299. }
  1300. EXPORT_SYMBOL_GPL(kthread_use_mm);
  1301. /**
  1302. * kthread_unuse_mm - reverse the effect of kthread_use_mm()
  1303. * @mm: address space to operate on
  1304. */
  1305. void kthread_unuse_mm(struct mm_struct *mm)
  1306. {
  1307. struct task_struct *tsk = current;
  1308. WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
  1309. WARN_ON_ONCE(!tsk->mm);
  1310. task_lock(tsk);
  1311. /*
  1312. * When a kthread stops operating on an address space, the loop
  1313. * in membarrier_{private,global}_expedited() may not observe
  1314. * that tsk->mm, and not issue an IPI. Membarrier requires a
  1315. * memory barrier after accessing user-space memory, before
  1316. * clearing tsk->mm.
  1317. */
  1318. smp_mb__after_spinlock();
  1319. local_irq_disable();
  1320. tsk->mm = NULL;
  1321. membarrier_update_current_mm(NULL);
  1322. mmgrab_lazy_tlb(mm);
  1323. /* active_mm is still 'mm' */
  1324. enter_lazy_tlb(mm, tsk);
  1325. local_irq_enable();
  1326. task_unlock(tsk);
  1327. mmdrop(mm);
  1328. }
  1329. EXPORT_SYMBOL_GPL(kthread_unuse_mm);
  1330. #ifdef CONFIG_BLK_CGROUP
  1331. /**
  1332. * kthread_associate_blkcg - associate blkcg to current kthread
  1333. * @css: the cgroup info
  1334. *
  1335. * Current thread must be a kthread. The thread is running jobs on behalf of
  1336. * other threads. In some cases, we expect the jobs attach cgroup info of
  1337. * original threads instead of that of current thread. This function stores
  1338. * original thread's cgroup info in current kthread context for later
  1339. * retrieval.
  1340. */
  1341. void kthread_associate_blkcg(struct cgroup_subsys_state *css)
  1342. {
  1343. struct kthread *kthread;
  1344. if (!(current->flags & PF_KTHREAD))
  1345. return;
  1346. kthread = to_kthread(current);
  1347. if (!kthread)
  1348. return;
  1349. if (kthread->blkcg_css) {
  1350. css_put(kthread->blkcg_css);
  1351. kthread->blkcg_css = NULL;
  1352. }
  1353. if (css) {
  1354. css_get(css);
  1355. kthread->blkcg_css = css;
  1356. }
  1357. }
  1358. EXPORT_SYMBOL(kthread_associate_blkcg);
  1359. /**
  1360. * kthread_blkcg - get associated blkcg css of current kthread
  1361. *
  1362. * Current thread must be a kthread.
  1363. */
  1364. struct cgroup_subsys_state *kthread_blkcg(void)
  1365. {
  1366. struct kthread *kthread;
  1367. if (current->flags & PF_KTHREAD) {
  1368. kthread = to_kthread(current);
  1369. if (kthread)
  1370. return kthread->blkcg_css;
  1371. }
  1372. return NULL;
  1373. }
  1374. #endif