mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-26 18:43:33 -05:00
pm: introduce new interfaces schedule_work_on() and queue_work_on()
This interface allows adding a job on a specific cpu. Although a work struct on a cpu will be scheduled to other cpu if the cpu dies, there is a recursion if a work task tries to offline the cpu it's running on. we need to schedule the task to a specific cpu in this case. http://bugzilla.kernel.org/show_bug.cgi?id=10897 [oleg@tv-sign.ru: cleanups] Signed-off-by: Zhang Rui <rui.zhang@intel.com> Tested-by: Rus <harbour@sfinx.od.ua> Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Acked-by: Pavel Machek <pavel@ucw.cz> Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0d83304c7e
commit
c1a220e7ac
2 changed files with 41 additions and 1 deletions
|
@ -179,6 +179,8 @@ __create_workqueue_key(const char *name, int singlethread,
|
||||||
extern void destroy_workqueue(struct workqueue_struct *wq);
|
extern void destroy_workqueue(struct workqueue_struct *wq);
|
||||||
|
|
||||||
extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
|
extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
|
||||||
|
extern int queue_work_on(int cpu, struct workqueue_struct *wq,
|
||||||
|
struct work_struct *work);
|
||||||
extern int queue_delayed_work(struct workqueue_struct *wq,
|
extern int queue_delayed_work(struct workqueue_struct *wq,
|
||||||
struct delayed_work *work, unsigned long delay);
|
struct delayed_work *work, unsigned long delay);
|
||||||
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
||||||
|
@ -188,6 +190,7 @@ extern void flush_workqueue(struct workqueue_struct *wq);
|
||||||
extern void flush_scheduled_work(void);
|
extern void flush_scheduled_work(void);
|
||||||
|
|
||||||
extern int schedule_work(struct work_struct *work);
|
extern int schedule_work(struct work_struct *work);
|
||||||
|
extern int schedule_work_on(int cpu, struct work_struct *work);
|
||||||
extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
|
extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
|
||||||
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
|
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
|
||||||
unsigned long delay);
|
unsigned long delay);
|
||||||
|
|
|
@ -140,7 +140,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
|
||||||
wake_up(&cwq->more_work);
|
wake_up(&cwq->more_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Preempt must be disabled. */
|
|
||||||
static void __queue_work(struct cpu_workqueue_struct *cwq,
|
static void __queue_work(struct cpu_workqueue_struct *cwq,
|
||||||
struct work_struct *work)
|
struct work_struct *work)
|
||||||
{
|
{
|
||||||
|
@ -175,6 +174,31 @@ int queue_work(struct workqueue_struct *wq, struct work_struct *work)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(queue_work);
|
EXPORT_SYMBOL_GPL(queue_work);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* queue_work_on - queue work on specific cpu
|
||||||
|
* @cpu: CPU number to execute work on
|
||||||
|
* @wq: workqueue to use
|
||||||
|
* @work: work to queue
|
||||||
|
*
|
||||||
|
* Returns 0 if @work was already on a queue, non-zero otherwise.
|
||||||
|
*
|
||||||
|
* We queue the work to a specific CPU, the caller must ensure it
|
||||||
|
* can't go away.
|
||||||
|
*/
|
||||||
|
int
|
||||||
|
queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
|
||||||
|
BUG_ON(!list_empty(&work->entry));
|
||||||
|
__queue_work(wq_per_cpu(wq, cpu), work);
|
||||||
|
ret = 1;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(queue_work_on);
|
||||||
|
|
||||||
static void delayed_work_timer_fn(unsigned long __data)
|
static void delayed_work_timer_fn(unsigned long __data)
|
||||||
{
|
{
|
||||||
struct delayed_work *dwork = (struct delayed_work *)__data;
|
struct delayed_work *dwork = (struct delayed_work *)__data;
|
||||||
|
@ -553,6 +577,19 @@ int schedule_work(struct work_struct *work)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(schedule_work);
|
EXPORT_SYMBOL(schedule_work);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* schedule_work_on - put work task on a specific cpu
|
||||||
|
* @cpu: cpu to put the work task on
|
||||||
|
* @work: job to be done
|
||||||
|
*
|
||||||
|
* This puts a job on a specific cpu
|
||||||
|
*/
|
||||||
|
int schedule_work_on(int cpu, struct work_struct *work)
|
||||||
|
{
|
||||||
|
return queue_work_on(cpu, keventd_wq, work);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(schedule_work_on);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* schedule_delayed_work - put work task in global workqueue after delay
|
* schedule_delayed_work - put work task in global workqueue after delay
|
||||||
* @dwork: job to be done
|
* @dwork: job to be done
|
||||||
|
|
Loading…
Add table
Reference in a new issue