From f293ea92007419e4f9c52db0cf57af17f45b9f94 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 9 May 2007 02:34:10 -0700 Subject: workqueue: don't save interrupts in run_workqueue() work->func() may sleep, it's a bug to call run_workqueue() with irqs disabled. Signed-off-by: Oleg Nesterov Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/workqueue.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ea422254f8bf..74f3f7825229 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -227,13 +227,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on); static void run_workqueue(struct cpu_workqueue_struct *cwq) { - unsigned long flags; - - /* - * Keep taking off work from the queue until - * done. - */ - spin_lock_irqsave(&cwq->lock, flags); + spin_lock_irq(&cwq->lock); cwq->run_depth++; if (cwq->run_depth > 3) { /* morton gets to eat his hat */ @@ -248,7 +242,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) cwq->current_work = work; list_del_init(cwq->worklist.next); - spin_unlock_irqrestore(&cwq->lock, flags); + spin_unlock_irq(&cwq->lock); BUG_ON(get_wq_data(work) != cwq); if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) @@ -266,11 +260,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) dump_stack(); } - spin_lock_irqsave(&cwq->lock, flags); + spin_lock_irq(&cwq->lock); cwq->current_work = NULL; } cwq->run_depth--; - spin_unlock_irqrestore(&cwq->lock, flags); + spin_unlock_irq(&cwq->lock); } /* @@ -399,6 +393,8 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) */ void fastcall flush_workqueue(struct workqueue_struct *wq) { + might_sleep(); + if (is_single_threaded(wq)) flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); else { @@ -445,6 +441,8 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work) { struct cpu_workqueue_struct *cwq; + might_sleep(); + cwq = get_wq_data(work); /* Was it ever queued ? */ if (!cwq) -- cgit v1.2.3 id=94deaf69dcd33462c61fa8cabb0883e3085a1046&follow=1'>root/lib/stackinit_kunit.c (unfollow)
AgeCommit message (Expand)AuthorFilesLines
2024-09-09selftests/mm: relax test to fail after 100 migration failuresDev Jain1-6/+11
2024-09-09mm/vmalloc.c: make use of the helper macro LIST_HEAD()Hongbo Li1-8/+3
2024-09-09mm: add sysfs entry to disable splitting underused THPsUsama Arif2-0/+36
2024-09-09mm: split underused THPsUsama Arif6-3/+69
2024-09-09mm: introduce a pageflag for partially mapped foliosUsama Arif8-21/+56
2024-09-09mm: selftest to verify zero-filled pages are mapped to zeropageAlexander Zhu3-0/+94
2024-09-09mm: remap unused subpages to shared zeropage when splitting isolated thpYu Zhao4-16/+75
2024-09-09mm: warn about illegal __GFP_NOFAIL usage in a more appropriate location and ...Barry Song2-26/+27
2024-09-09mm: document __GFP_NOFAIL must be blockableBarry Song1-1/+4
2024-09-09vduse: avoid using __GFP_NOFAILJason Wang2-8/+12
2024-09-09mm/hugetlb: sort out global lock annotationsMateusz Guzik1-3/+3
2024-09-09mm: shmem: extend shmem_unused_huge_shrink() to all sizesHugh Dickins1-25/+20
2024-09-09mm: shmem: fix minor off-by-one in shrinkable calculationHugh Dickins1-1/+1
2024-09-09maple_tree: dump error message based on formatWei Yang1-2/+8
2024-09-09maple_tree: arange64 node is not a leaf nodeWei Yang1-5/+1
2024-09-09Docs/damon/maintainer-profile: document Google calendar for bi-weekly meetupsSeongJae Park1-2/+4
2024-09-09Docs/damon/maintainer-profile: add links in placeSeongJae Park1-40/+44
2024-09-09Docs/damon: use damonitor GitHub organization instead of awslabsSeongJae Park8-23/+23
2024-09-09Revert "mm/damon/lru_sort: adjust local variable to dynamic allocation"SeongJae Park1-11/+4
2024-09-09mm/damon/core: remove per-scheme region priority histogram bufferSeongJae Park1-1/+0
2024-09-09mm/damon/core: replace per-quota regions priority histogram buffer usage with...SeongJae Park1-3/+6
2024-09-09mm/damon/core: introduce per-context region priorities histogram bufferSeongJae Park2-0/+7
2024-09-09mm: remove putback_lru_page()Kefeng Wang2-6/+0
2024-09-09mm: remove isolate_lru_page()Kefeng Wang9-33/+25
2024-09-09mm: migrate_device: use more folio in migrate_device_finalize()Kefeng Wang1-19/+22
2024-09-09mm: migrate_device: use more folio in migrate_device_unmap()Kefeng Wang1-10/+10
2024-09-09mm: migrate_device: use a folio in migrate_device_range()Kefeng Wang1-4/+5
2024-09-09mm: migrate_device: convert to migrate_device_coherent_folio()Kefeng Wang3-17/+17
2024-09-09swap: convert swapon() to use a folioMatthew Wilcox (Oracle)1-9/+7
2024-09-09mm: count the number of partially mapped anonymous THPs per sizeBarry Song3-0/+14
2024-09-09mm: count the number of anonymous THPs per sizeBarry Song6-6/+37
2024-09-09mm: tidy up shmem mTHP controls and statsRyan Roberts1-28/+112
2024-09-09mm: cleanup count_mthp_stat() definitionRyan Roberts3-43/+35
2024-09-03mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolationKefeng Wang1-28/+15
2024-09-03mm: migrate: add isolate_folio_to_list()Kefeng Wang3-35/+42
2024-09-03mm: memory_hotplug: check hwpoisoned page firstly in do_migrate_range()Kefeng Wang1-10/+10
2024-09-03mm: memory-failure: add unmap_poisoned_folio()Kefeng Wang2-17/+35
2024-09-03mm: memory_hotplug: remove head variable in do_migrate_range()Kefeng Wang1-8/+14