mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-22 07:53:11 -05:00
treewide: Introduce kthread_run_worker[_on_cpu]()
kthread_create() creates a kthread without running it yet. kthread_run() creates a kthread and runs it. On the other hand, kthread_create_worker() creates a kthread worker and runs it. This difference in behaviours is confusing. Also there is no way to create a kthread worker and affine it using kthread_bind_mask() or kthread_affine_preferred() before starting it. Consolidate the behaviours and introduce kthread_run_worker[_on_cpu]() that behaves just like kthread_run(). kthread_create_worker[_on_cpu]() will now only create a kthread worker without starting it. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
This commit is contained in:
parent
41f70d8e16
commit
b04e317b52
33 changed files with 83 additions and 66 deletions
|
@ -681,7 +681,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
|
||||||
pid_nr = pid_vnr(pid);
|
pid_nr = pid_vnr(pid);
|
||||||
put_pid(pid);
|
put_pid(pid);
|
||||||
|
|
||||||
pit->worker = kthread_create_worker(0, "kvm-pit/%d", pid_nr);
|
pit->worker = kthread_run_worker(0, "kvm-pit/%d", pid_nr);
|
||||||
if (IS_ERR(pit->worker))
|
if (IS_ERR(pit->worker))
|
||||||
goto fail_kthread;
|
goto fail_kthread;
|
||||||
|
|
||||||
|
|
|
@ -517,7 +517,7 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
|
||||||
crypto_init_queue(&engine->queue, qlen);
|
crypto_init_queue(&engine->queue, qlen);
|
||||||
spin_lock_init(&engine->queue_lock);
|
spin_lock_init(&engine->queue_lock);
|
||||||
|
|
||||||
engine->kworker = kthread_create_worker(0, "%s", engine->name);
|
engine->kworker = kthread_run_worker(0, "%s", engine->name);
|
||||||
if (IS_ERR(engine->kworker)) {
|
if (IS_ERR(engine->kworker)) {
|
||||||
dev_err(dev, "failed to create crypto request pump task\n");
|
dev_err(dev, "failed to create crypto request pump task\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -225,7 +225,7 @@ static void __init cppc_freq_invariance_init(void)
|
||||||
if (fie_disabled)
|
if (fie_disabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
kworker_fie = kthread_create_worker(0, "cppc_fie");
|
kworker_fie = kthread_run_worker(0, "cppc_fie");
|
||||||
if (IS_ERR(kworker_fie)) {
|
if (IS_ERR(kworker_fie)) {
|
||||||
pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
|
pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
|
||||||
PTR_ERR(kworker_fie));
|
PTR_ERR(kworker_fie));
|
||||||
|
|
|
@ -277,7 +277,7 @@ int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
|
||||||
|
|
||||||
INIT_LIST_HEAD(&vblank->pending_work);
|
INIT_LIST_HEAD(&vblank->pending_work);
|
||||||
init_waitqueue_head(&vblank->work_wait_queue);
|
init_waitqueue_head(&vblank->work_wait_queue);
|
||||||
worker = kthread_create_worker(0, "card%d-crtc%d",
|
worker = kthread_run_worker(0, "card%d-crtc%d",
|
||||||
vblank->dev->primary->index,
|
vblank->dev->primary->index,
|
||||||
vblank->pipe);
|
vblank->pipe);
|
||||||
if (IS_ERR(worker))
|
if (IS_ERR(worker))
|
||||||
|
|
|
@ -369,7 +369,7 @@ static int live_parallel_switch(void *arg)
|
||||||
if (!data[n].ce[0])
|
if (!data[n].ce[0])
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
worker = kthread_create_worker(0, "igt/parallel:%s",
|
worker = kthread_run_worker(0, "igt/parallel:%s",
|
||||||
data[n].ce[0]->engine->name);
|
data[n].ce[0]->engine->name);
|
||||||
if (IS_ERR(worker)) {
|
if (IS_ERR(worker)) {
|
||||||
err = PTR_ERR(worker);
|
err = PTR_ERR(worker);
|
||||||
|
|
|
@ -3574,7 +3574,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
|
||||||
arg[id].batch = NULL;
|
arg[id].batch = NULL;
|
||||||
arg[id].count = 0;
|
arg[id].count = 0;
|
||||||
|
|
||||||
worker[id] = kthread_create_worker(0, "igt/smoke:%d", id);
|
worker[id] = kthread_run_worker(0, "igt/smoke:%d", id);
|
||||||
if (IS_ERR(worker[id])) {
|
if (IS_ERR(worker[id])) {
|
||||||
err = PTR_ERR(worker[id]);
|
err = PTR_ERR(worker[id]);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -1025,7 +1025,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
|
||||||
threads[tmp].engine = other;
|
threads[tmp].engine = other;
|
||||||
threads[tmp].flags = flags;
|
threads[tmp].flags = flags;
|
||||||
|
|
||||||
worker = kthread_create_worker(0, "igt/%s",
|
worker = kthread_run_worker(0, "igt/%s",
|
||||||
other->name);
|
other->name);
|
||||||
if (IS_ERR(worker)) {
|
if (IS_ERR(worker)) {
|
||||||
err = PTR_ERR(worker);
|
err = PTR_ERR(worker);
|
||||||
|
|
|
@ -489,7 +489,7 @@ static int live_slpc_tile_interaction(void *arg)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
for_each_gt(gt, i915, i) {
|
for_each_gt(gt, i915, i) {
|
||||||
threads[i].worker = kthread_create_worker(0, "igt/slpc_parallel:%d", gt->info.id);
|
threads[i].worker = kthread_run_worker(0, "igt/slpc_parallel:%d", gt->info.id);
|
||||||
|
|
||||||
if (IS_ERR(threads[i].worker)) {
|
if (IS_ERR(threads[i].worker)) {
|
||||||
ret = PTR_ERR(threads[i].worker);
|
ret = PTR_ERR(threads[i].worker);
|
||||||
|
|
|
@ -492,7 +492,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
|
||||||
for (n = 0; n < ncpus; n++) {
|
for (n = 0; n < ncpus; n++) {
|
||||||
struct kthread_worker *worker;
|
struct kthread_worker *worker;
|
||||||
|
|
||||||
worker = kthread_create_worker(0, "igt/%d", n);
|
worker = kthread_run_worker(0, "igt/%d", n);
|
||||||
if (IS_ERR(worker)) {
|
if (IS_ERR(worker)) {
|
||||||
ret = PTR_ERR(worker);
|
ret = PTR_ERR(worker);
|
||||||
ncpus = n;
|
ncpus = n;
|
||||||
|
@ -1645,7 +1645,7 @@ static int live_parallel_engines(void *arg)
|
||||||
for_each_uabi_engine(engine, i915) {
|
for_each_uabi_engine(engine, i915) {
|
||||||
struct kthread_worker *worker;
|
struct kthread_worker *worker;
|
||||||
|
|
||||||
worker = kthread_create_worker(0, "igt/parallel:%s",
|
worker = kthread_run_worker(0, "igt/parallel:%s",
|
||||||
engine->name);
|
engine->name);
|
||||||
if (IS_ERR(worker)) {
|
if (IS_ERR(worker)) {
|
||||||
err = PTR_ERR(worker);
|
err = PTR_ERR(worker);
|
||||||
|
@ -1806,7 +1806,7 @@ static int live_breadcrumbs_smoketest(void *arg)
|
||||||
unsigned int i = idx * ncpus + n;
|
unsigned int i = idx * ncpus + n;
|
||||||
struct kthread_worker *worker;
|
struct kthread_worker *worker;
|
||||||
|
|
||||||
worker = kthread_create_worker(0, "igt/%d.%d", idx, n);
|
worker = kthread_run_worker(0, "igt/%d.%d", idx, n);
|
||||||
if (IS_ERR(worker)) {
|
if (IS_ERR(worker)) {
|
||||||
ret = PTR_ERR(worker);
|
ret = PTR_ERR(worker);
|
||||||
goto out_flush;
|
goto out_flush;
|
||||||
|
@ -3219,7 +3219,7 @@ static int perf_parallel_engines(void *arg)
|
||||||
|
|
||||||
memset(&engines[idx].p, 0, sizeof(engines[idx].p));
|
memset(&engines[idx].p, 0, sizeof(engines[idx].p));
|
||||||
|
|
||||||
worker = kthread_create_worker(0, "igt:%s",
|
worker = kthread_run_worker(0, "igt:%s",
|
||||||
engine->name);
|
engine->name);
|
||||||
if (IS_ERR(worker)) {
|
if (IS_ERR(worker)) {
|
||||||
err = PTR_ERR(worker);
|
err = PTR_ERR(worker);
|
||||||
|
|
|
@ -109,7 +109,7 @@ int msm_disp_snapshot_init(struct drm_device *drm_dev)
|
||||||
|
|
||||||
mutex_init(&kms->dump_mutex);
|
mutex_init(&kms->dump_mutex);
|
||||||
|
|
||||||
kms->dump_worker = kthread_create_worker(0, "%s", "disp_snapshot");
|
kms->dump_worker = kthread_run_worker(0, "%s", "disp_snapshot");
|
||||||
if (IS_ERR(kms->dump_worker))
|
if (IS_ERR(kms->dump_worker))
|
||||||
DRM_ERROR("failed to create disp state task\n");
|
DRM_ERROR("failed to create disp state task\n");
|
||||||
|
|
||||||
|
|
|
@ -115,7 +115,7 @@ int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
|
||||||
timer->kms = kms;
|
timer->kms = kms;
|
||||||
timer->crtc_idx = crtc_idx;
|
timer->crtc_idx = crtc_idx;
|
||||||
|
|
||||||
timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx);
|
timer->worker = kthread_run_worker(0, "atomic-worker-%d", crtc_idx);
|
||||||
if (IS_ERR(timer->worker)) {
|
if (IS_ERR(timer->worker)) {
|
||||||
int ret = PTR_ERR(timer->worker);
|
int ret = PTR_ERR(timer->worker);
|
||||||
timer->worker = NULL;
|
timer->worker = NULL;
|
||||||
|
|
|
@ -859,7 +859,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
gpu->funcs = funcs;
|
gpu->funcs = funcs;
|
||||||
gpu->name = name;
|
gpu->name = name;
|
||||||
|
|
||||||
gpu->worker = kthread_create_worker(0, "gpu-worker");
|
gpu->worker = kthread_run_worker(0, "gpu-worker");
|
||||||
if (IS_ERR(gpu->worker)) {
|
if (IS_ERR(gpu->worker)) {
|
||||||
ret = PTR_ERR(gpu->worker);
|
ret = PTR_ERR(gpu->worker);
|
||||||
gpu->worker = NULL;
|
gpu->worker = NULL;
|
||||||
|
|
|
@ -269,7 +269,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
|
||||||
/* initialize event thread */
|
/* initialize event thread */
|
||||||
ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
|
ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
|
||||||
ev_thread->dev = ddev;
|
ev_thread->dev = ddev;
|
||||||
ev_thread->worker = kthread_create_worker(0, "crtc_event:%d", crtc->base.id);
|
ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
|
||||||
if (IS_ERR(ev_thread->worker)) {
|
if (IS_ERR(ev_thread->worker)) {
|
||||||
ret = PTR_ERR(ev_thread->worker);
|
ret = PTR_ERR(ev_thread->worker);
|
||||||
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
|
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
|
||||||
|
|
|
@ -271,7 +271,7 @@ static int wave5_vpu_probe(struct platform_device *pdev)
|
||||||
dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n");
|
dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n");
|
||||||
hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
|
hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
|
||||||
dev->hrtimer.function = &wave5_vpu_timer_callback;
|
dev->hrtimer.function = &wave5_vpu_timer_callback;
|
||||||
dev->worker = kthread_create_worker(0, "vpu_irq_thread");
|
dev->worker = kthread_run_worker(0, "vpu_irq_thread");
|
||||||
if (IS_ERR(dev->worker)) {
|
if (IS_ERR(dev->worker)) {
|
||||||
dev_err(&pdev->dev, "failed to create vpu irq worker\n");
|
dev_err(&pdev->dev, "failed to create vpu irq worker\n");
|
||||||
ret = PTR_ERR(dev->worker);
|
ret = PTR_ERR(dev->worker);
|
||||||
|
|
|
@ -394,7 +394,7 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
|
||||||
kthread_init_delayed_work(&chip->irq_poll_work,
|
kthread_init_delayed_work(&chip->irq_poll_work,
|
||||||
mv88e6xxx_irq_poll);
|
mv88e6xxx_irq_poll);
|
||||||
|
|
||||||
chip->kworker = kthread_create_worker(0, "%s", dev_name(chip->dev));
|
chip->kworker = kthread_run_worker(0, "%s", dev_name(chip->dev));
|
||||||
if (IS_ERR(chip->kworker))
|
if (IS_ERR(chip->kworker))
|
||||||
return PTR_ERR(chip->kworker);
|
return PTR_ERR(chip->kworker);
|
||||||
|
|
||||||
|
|
|
@ -2053,7 +2053,7 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
|
||||||
struct kthread_worker *kworker;
|
struct kthread_worker *kworker;
|
||||||
|
|
||||||
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
|
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
|
||||||
kworker = kthread_create_worker(0, "ice-dplls-%s",
|
kworker = kthread_run_worker(0, "ice-dplls-%s",
|
||||||
dev_name(ice_pf_to_dev(pf)));
|
dev_name(ice_pf_to_dev(pf)));
|
||||||
if (IS_ERR(kworker))
|
if (IS_ERR(kworker))
|
||||||
return PTR_ERR(kworker);
|
return PTR_ERR(kworker);
|
||||||
|
|
|
@ -182,7 +182,7 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
|
||||||
pf->gnss_serial = gnss;
|
pf->gnss_serial = gnss;
|
||||||
|
|
||||||
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
|
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
|
||||||
kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
|
kworker = kthread_run_worker(0, "ice-gnss-%s", dev_name(dev));
|
||||||
if (IS_ERR(kworker)) {
|
if (IS_ERR(kworker)) {
|
||||||
kfree(gnss);
|
kfree(gnss);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -3080,7 +3080,7 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
|
||||||
/* Allocate a kworker for handling work required for the ports
|
/* Allocate a kworker for handling work required for the ports
|
||||||
* connected to the PTP hardware clock.
|
* connected to the PTP hardware clock.
|
||||||
*/
|
*/
|
||||||
kworker = kthread_create_worker(0, "ice-ptp-%s",
|
kworker = kthread_run_worker(0, "ice-ptp-%s",
|
||||||
dev_name(ice_pf_to_dev(pf)));
|
dev_name(ice_pf_to_dev(pf)));
|
||||||
if (IS_ERR(kworker))
|
if (IS_ERR(kworker))
|
||||||
return PTR_ERR(kworker);
|
return PTR_ERR(kworker);
|
||||||
|
|
|
@ -715,7 +715,7 @@ static int cros_ec_spi_devm_high_pri_alloc(struct device *dev,
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
ec_spi->high_pri_worker =
|
ec_spi->high_pri_worker =
|
||||||
kthread_create_worker(0, "cros_ec_spi_high_pri");
|
kthread_run_worker(0, "cros_ec_spi_high_pri");
|
||||||
|
|
||||||
if (IS_ERR(ec_spi->high_pri_worker)) {
|
if (IS_ERR(ec_spi->high_pri_worker)) {
|
||||||
err = PTR_ERR(ec_spi->high_pri_worker);
|
err = PTR_ERR(ec_spi->high_pri_worker);
|
||||||
|
|
|
@ -296,7 +296,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
||||||
|
|
||||||
if (ptp->info->do_aux_work) {
|
if (ptp->info->do_aux_work) {
|
||||||
kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
|
kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
|
||||||
ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
|
ptp->kworker = kthread_run_worker(0, "ptp%d", ptp->index);
|
||||||
if (IS_ERR(ptp->kworker)) {
|
if (IS_ERR(ptp->kworker)) {
|
||||||
err = PTR_ERR(ptp->kworker);
|
err = PTR_ERR(ptp->kworker);
|
||||||
pr_err("failed to create ptp aux_worker %d\n", err);
|
pr_err("failed to create ptp aux_worker %d\n", err);
|
||||||
|
|
|
@ -2060,7 +2060,7 @@ static int spi_init_queue(struct spi_controller *ctlr)
|
||||||
ctlr->busy = false;
|
ctlr->busy = false;
|
||||||
ctlr->queue_empty = true;
|
ctlr->queue_empty = true;
|
||||||
|
|
||||||
ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
|
ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev));
|
||||||
if (IS_ERR(ctlr->kworker)) {
|
if (IS_ERR(ctlr->kworker)) {
|
||||||
dev_err(&ctlr->dev, "failed to create message pump kworker\n");
|
dev_err(&ctlr->dev, "failed to create message pump kworker\n");
|
||||||
return PTR_ERR(ctlr->kworker);
|
return PTR_ERR(ctlr->kworker);
|
||||||
|
|
|
@ -7635,7 +7635,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
|
||||||
mutex_init(&port->lock);
|
mutex_init(&port->lock);
|
||||||
mutex_init(&port->swap_lock);
|
mutex_init(&port->swap_lock);
|
||||||
|
|
||||||
port->wq = kthread_create_worker(0, dev_name(dev));
|
port->wq = kthread_run_worker(0, dev_name(dev));
|
||||||
if (IS_ERR(port->wq))
|
if (IS_ERR(port->wq))
|
||||||
return ERR_CAST(port->wq);
|
return ERR_CAST(port->wq);
|
||||||
sched_set_fifo(port->wq->task);
|
sched_set_fifo(port->wq->task);
|
||||||
|
|
|
@ -229,7 +229,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
|
||||||
dev = &vdpasim->vdpa.dev;
|
dev = &vdpasim->vdpa.dev;
|
||||||
|
|
||||||
kthread_init_work(&vdpasim->work, vdpasim_work_fn);
|
kthread_init_work(&vdpasim->work, vdpasim_work_fn);
|
||||||
vdpasim->worker = kthread_create_worker(0, "vDPA sim worker: %s",
|
vdpasim->worker = kthread_run_worker(0, "vDPA sim worker: %s",
|
||||||
dev_attr->name);
|
dev_attr->name);
|
||||||
if (IS_ERR(vdpasim->worker))
|
if (IS_ERR(vdpasim->worker))
|
||||||
goto err_iommu;
|
goto err_iommu;
|
||||||
|
|
|
@ -1229,7 +1229,7 @@ int __init watchdog_dev_init(void)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
watchdog_kworker = kthread_create_worker(0, "watchdogd");
|
watchdog_kworker = kthread_run_worker(0, "watchdogd");
|
||||||
if (IS_ERR(watchdog_kworker)) {
|
if (IS_ERR(watchdog_kworker)) {
|
||||||
pr_err("Failed to create watchdog kworker\n");
|
pr_err("Failed to create watchdog kworker\n");
|
||||||
return PTR_ERR(watchdog_kworker);
|
return PTR_ERR(watchdog_kworker);
|
||||||
|
|
|
@ -320,7 +320,7 @@ static void erofs_destroy_percpu_workers(void)
|
||||||
static struct kthread_worker *erofs_init_percpu_worker(int cpu)
|
static struct kthread_worker *erofs_init_percpu_worker(int cpu)
|
||||||
{
|
{
|
||||||
struct kthread_worker *worker =
|
struct kthread_worker *worker =
|
||||||
kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u");
|
kthread_run_worker_on_cpu(cpu, 0, "erofs_worker/%u");
|
||||||
|
|
||||||
if (IS_ERR(worker))
|
if (IS_ERR(worker))
|
||||||
return worker;
|
return worker;
|
||||||
|
|
|
@ -193,19 +193,53 @@ struct kthread_worker *kthread_create_worker_on_node(unsigned int flags,
|
||||||
const char namefmt[], ...);
|
const char namefmt[], ...);
|
||||||
|
|
||||||
#define kthread_create_worker(flags, namefmt, ...) \
|
#define kthread_create_worker(flags, namefmt, ...) \
|
||||||
({ \
|
kthread_create_worker_on_node(flags, NUMA_NO_NODE, namefmt, ## __VA_ARGS__);
|
||||||
struct kthread_worker *__kw \
|
|
||||||
= kthread_create_worker_on_node(flags, NUMA_NO_NODE, \
|
/**
|
||||||
namefmt, ## __VA_ARGS__); \
|
* kthread_run_worker - create and wake a kthread worker.
|
||||||
if (!IS_ERR(__kw)) \
|
* @flags: flags modifying the default behavior of the worker
|
||||||
wake_up_process(__kw->task); \
|
* @namefmt: printf-style name for the thread.
|
||||||
__kw; \
|
*
|
||||||
|
* Description: Convenient wrapper for kthread_create_worker() followed by
|
||||||
|
* wake_up_process(). Returns the kthread_worker or ERR_PTR(-ENOMEM).
|
||||||
|
*/
|
||||||
|
#define kthread_run_worker(flags, namefmt, ...) \
|
||||||
|
({ \
|
||||||
|
struct kthread_worker *__kw \
|
||||||
|
= kthread_create_worker(flags, namefmt, ## __VA_ARGS__); \
|
||||||
|
if (!IS_ERR(__kw)) \
|
||||||
|
wake_up_process(__kw->task); \
|
||||||
|
__kw; \
|
||||||
})
|
})
|
||||||
|
|
||||||
struct kthread_worker *
|
struct kthread_worker *
|
||||||
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
|
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
|
||||||
const char namefmt[]);
|
const char namefmt[]);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kthread_run_worker_on_cpu - create and wake a cpu bound kthread worker.
|
||||||
|
* @cpu: CPU number
|
||||||
|
* @flags: flags modifying the default behavior of the worker
|
||||||
|
* @namefmt: printf-style name for the thread. Format is restricted
|
||||||
|
* to "name.*%u". Code fills in cpu number.
|
||||||
|
*
|
||||||
|
* Description: Convenient wrapper for kthread_create_worker_on_cpu()
|
||||||
|
* followed by wake_up_process(). Returns the kthread_worker or
|
||||||
|
* ERR_PTR(-ENOMEM).
|
||||||
|
*/
|
||||||
|
static inline struct kthread_worker *
|
||||||
|
kthread_run_worker_on_cpu(int cpu, unsigned int flags,
|
||||||
|
const char namefmt[])
|
||||||
|
{
|
||||||
|
struct kthread_worker *kw;
|
||||||
|
|
||||||
|
kw = kthread_create_worker_on_cpu(cpu, flags, namefmt);
|
||||||
|
if (!IS_ERR(kw))
|
||||||
|
wake_up_process(kw->task);
|
||||||
|
|
||||||
|
return kw;
|
||||||
|
}
|
||||||
|
|
||||||
bool kthread_queue_work(struct kthread_worker *worker,
|
bool kthread_queue_work(struct kthread_worker *worker,
|
||||||
struct kthread_work *work);
|
struct kthread_work *work);
|
||||||
|
|
||||||
|
|
|
@ -1077,33 +1077,10 @@ kthread_create_worker_on_node(unsigned int flags, int node, const char namefmt[]
|
||||||
worker = __kthread_create_worker_on_node(flags, node, namefmt, args);
|
worker = __kthread_create_worker_on_node(flags, node, namefmt, args);
|
||||||
va_end(args);
|
va_end(args);
|
||||||
|
|
||||||
if (worker)
|
|
||||||
wake_up_process(worker->task);
|
|
||||||
|
|
||||||
return worker;
|
return worker;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kthread_create_worker_on_node);
|
EXPORT_SYMBOL(kthread_create_worker_on_node);
|
||||||
|
|
||||||
static __printf(3, 4) struct kthread_worker *
|
|
||||||
__kthread_create_worker_on_cpu(int cpu, unsigned int flags,
|
|
||||||
const char namefmt[], ...)
|
|
||||||
{
|
|
||||||
struct kthread_worker *worker;
|
|
||||||
va_list args;
|
|
||||||
|
|
||||||
va_start(args, namefmt);
|
|
||||||
worker = __kthread_create_worker_on_node(flags, cpu_to_node(cpu),
|
|
||||||
namefmt, args);
|
|
||||||
va_end(args);
|
|
||||||
|
|
||||||
if (worker) {
|
|
||||||
kthread_bind(worker->task, cpu);
|
|
||||||
wake_up_process(worker->task);
|
|
||||||
}
|
|
||||||
|
|
||||||
return worker;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kthread_create_worker_on_cpu - create a kthread worker and bind it
|
* kthread_create_worker_on_cpu - create a kthread worker and bind it
|
||||||
* to a given CPU and the associated NUMA node.
|
* to a given CPU and the associated NUMA node.
|
||||||
|
@ -1144,7 +1121,13 @@ struct kthread_worker *
|
||||||
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
|
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
|
||||||
const char namefmt[])
|
const char namefmt[])
|
||||||
{
|
{
|
||||||
return __kthread_create_worker_on_cpu(cpu, flags, namefmt, cpu);
|
struct kthread_worker *worker;
|
||||||
|
|
||||||
|
worker = kthread_create_worker_on_node(flags, cpu_to_node(cpu), namefmt, cpu);
|
||||||
|
if (!IS_ERR(worker))
|
||||||
|
kthread_bind(worker->task, cpu);
|
||||||
|
|
||||||
|
return worker;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kthread_create_worker_on_cpu);
|
EXPORT_SYMBOL(kthread_create_worker_on_cpu);
|
||||||
|
|
||||||
|
|
|
@ -4906,7 +4906,7 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
|
||||||
if (rnp->exp_kworker)
|
if (rnp->exp_kworker)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
kworker = kthread_create_worker(0, name, rnp_index);
|
kworker = kthread_run_worker(0, name, rnp_index);
|
||||||
if (IS_ERR_OR_NULL(kworker)) {
|
if (IS_ERR_OR_NULL(kworker)) {
|
||||||
pr_err("Failed to create par gp kworker on %d/%d\n",
|
pr_err("Failed to create par gp kworker on %d/%d\n",
|
||||||
rnp->grplo, rnp->grphi);
|
rnp->grplo, rnp->grphi);
|
||||||
|
@ -4933,7 +4933,7 @@ static void __init rcu_start_exp_gp_kworker(void)
|
||||||
const char *name = "rcu_exp_gp_kthread_worker";
|
const char *name = "rcu_exp_gp_kthread_worker";
|
||||||
struct sched_param param = { .sched_priority = kthread_prio };
|
struct sched_param param = { .sched_priority = kthread_prio };
|
||||||
|
|
||||||
rcu_exp_gp_kworker = kthread_create_worker(0, name);
|
rcu_exp_gp_kworker = kthread_run_worker(0, name);
|
||||||
if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
|
if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
|
||||||
pr_err("Failed to create %s!\n", name);
|
pr_err("Failed to create %s!\n", name);
|
||||||
rcu_exp_gp_kworker = NULL;
|
rcu_exp_gp_kworker = NULL;
|
||||||
|
|
|
@ -5352,7 +5352,7 @@ static struct kthread_worker *scx_create_rt_helper(const char *name)
|
||||||
{
|
{
|
||||||
struct kthread_worker *helper;
|
struct kthread_worker *helper;
|
||||||
|
|
||||||
helper = kthread_create_worker(0, name);
|
helper = kthread_run_worker(0, name);
|
||||||
if (helper)
|
if (helper)
|
||||||
sched_set_fifo(helper->task);
|
sched_set_fifo(helper->task);
|
||||||
return helper;
|
return helper;
|
||||||
|
|
|
@ -7828,7 +7828,7 @@ static void __init wq_cpu_intensive_thresh_init(void)
|
||||||
unsigned long thresh;
|
unsigned long thresh;
|
||||||
unsigned long bogo;
|
unsigned long bogo;
|
||||||
|
|
||||||
pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
|
pwq_release_worker = kthread_run_worker(0, "pool_workqueue_release");
|
||||||
BUG_ON(IS_ERR(pwq_release_worker));
|
BUG_ON(IS_ERR(pwq_release_worker));
|
||||||
|
|
||||||
/* if the user set it to a specific value, keep it */
|
/* if the user set it to a specific value, keep it */
|
||||||
|
|
|
@ -66,7 +66,7 @@ static int ksz_connect(struct dsa_switch *ds)
|
||||||
if (!priv)
|
if (!priv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
|
xmit_worker = kthread_run_worker(0, "dsa%d:%d_xmit",
|
||||||
ds->dst->index, ds->index);
|
ds->dst->index, ds->index);
|
||||||
if (IS_ERR(xmit_worker)) {
|
if (IS_ERR(xmit_worker)) {
|
||||||
ret = PTR_ERR(xmit_worker);
|
ret = PTR_ERR(xmit_worker);
|
||||||
|
|
|
@ -110,7 +110,7 @@ static int ocelot_connect(struct dsa_switch *ds)
|
||||||
if (!priv)
|
if (!priv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
priv->xmit_worker = kthread_create_worker(0, "felix_xmit");
|
priv->xmit_worker = kthread_run_worker(0, "felix_xmit");
|
||||||
if (IS_ERR(priv->xmit_worker)) {
|
if (IS_ERR(priv->xmit_worker)) {
|
||||||
err = PTR_ERR(priv->xmit_worker);
|
err = PTR_ERR(priv->xmit_worker);
|
||||||
kfree(priv);
|
kfree(priv);
|
||||||
|
|
|
@ -707,7 +707,7 @@ static int sja1105_connect(struct dsa_switch *ds)
|
||||||
|
|
||||||
spin_lock_init(&priv->meta_lock);
|
spin_lock_init(&priv->meta_lock);
|
||||||
|
|
||||||
xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
|
xmit_worker = kthread_run_worker(0, "dsa%d:%d_xmit",
|
||||||
ds->dst->index, ds->index);
|
ds->dst->index, ds->index);
|
||||||
if (IS_ERR(xmit_worker)) {
|
if (IS_ERR(xmit_worker)) {
|
||||||
err = PTR_ERR(xmit_worker);
|
err = PTR_ERR(xmit_worker);
|
||||||
|
|
Loading…
Reference in a new issue