Home
last modified time | relevance | path

Searched refs:worker (Results 1 – 25 of 32) sorted by relevance

12

/drivers/infiniband/hw/qib/
Dqib_cq.c108 struct kthread_worker *worker; in qib_cq_enter() local
114 worker = cq->dd->worker; in qib_cq_enter()
115 if (likely(worker)) { in qib_cq_enter()
118 queue_kthread_work(worker, &cq->comptask); in qib_cq_enter()
502 if (dd->worker) in qib_cq_init()
504 dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL); in qib_cq_init()
505 if (!dd->worker) in qib_cq_init()
507 init_kthread_worker(dd->worker); in qib_cq_init()
510 dd->worker, in qib_cq_init()
522 kfree(dd->worker); in qib_cq_init()
[all …]
/drivers/gpu/drm/
Ddrm_flip_work.c62 queue_work(wq, &work->worker); in drm_flip_work_commit()
68 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker); in flip_worker()
107 INIT_WORK(&work->worker, flip_worker); in drm_flip_work_init()
/drivers/platform/olpc/
Dolpc-ec.c34 struct work_struct worker; member
68 struct olpc_ec_priv *ec = container_of(w, struct olpc_ec_priv, worker); in olpc_ec_worker()
94 schedule_work(&ec->worker); in olpc_ec_worker()
112 schedule_work(&ec->worker); in queue_ec_descriptor()
272 INIT_WORK(&ec->worker, olpc_ec_worker); in olpc_ec_probe()
/drivers/macintosh/ams/
Dams-core.c78 schedule_work(&ams_info.worker); in ams_handle_irq()
198 INIT_WORK(&ams_info.worker, ams_worker); in ams_init()
229 flush_work(&ams_info.worker); in ams_sensor_detach()
Dams.h33 struct work_struct worker; member
/drivers/vhost/
Dvhost.c164 wake_up_process(dev->worker); in vhost_work_queue()
309 dev->worker = NULL; in vhost_dev_init()
369 struct task_struct *worker; in vhost_dev_set_owner() local
380 worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); in vhost_dev_set_owner()
381 if (IS_ERR(worker)) { in vhost_dev_set_owner()
382 err = PTR_ERR(worker); in vhost_dev_set_owner()
386 dev->worker = worker; in vhost_dev_set_owner()
387 wake_up_process(worker); /* avoid contributing to loadavg */ in vhost_dev_set_owner()
399 kthread_stop(worker); in vhost_dev_set_owner()
400 dev->worker = NULL; in vhost_dev_set_owner()
[all …]
Dvhost.h125 struct task_struct *worker; member
/drivers/hid/
Duhid.c56 struct work_struct worker; member
63 struct uhid_device *uhid = container_of(work, struct uhid_device, worker); in uhid_device_add_worker()
534 schedule_work(&uhid->worker); in uhid_dev_create2()
578 cancel_work_sync(&uhid->worker); in uhid_dev_destroy()
642 INIT_WORK(&uhid->worker, uhid_device_add_worker); in uhid_char_open()
Dhid-wiimote.h110 struct work_struct worker; member
Dhid-wiimote-core.c47 worker); in wiimote_queue_worker()
104 schedule_work(&wdata->queue.worker); in wiimote_queue()
1734 INIT_WORK(&wdata->queue.worker, wiimote_queue_worker); in wiimote_create()
1768 cancel_work_sync(&wdata->queue.worker); in wiimote_destroy()
Dhid-sony.c1857 void (*worker)(struct work_struct *)) in sony_init_work()
1860 INIT_WORK(&sc->state_worker, worker); in sony_init_work()
/drivers/s390/net/
Dqeth_l2_main.c1477 struct work_struct worker; member
1485 container_of(work, struct qeth_bridge_state_data, worker); in qeth_bridge_state_change_worker()
1540 INIT_WORK(&data->worker, qeth_bridge_state_change_worker); in qeth_bridge_state_change()
1544 queue_work(qeth_wq, &data->worker); in qeth_bridge_state_change()
1548 struct work_struct worker; member
1556 container_of(work, struct qeth_bridge_host_data, worker); in qeth_bridge_host_event_worker()
1612 INIT_WORK(&data->worker, qeth_bridge_host_event_worker); in qeth_bridge_host_event()
1616 queue_work(qeth_wq, &data->worker); in qeth_bridge_host_event()
/drivers/block/
Dnvme-core.c82 struct kthread_worker *worker; member
242 queue_kthread_work(cmdinfo->worker, &cmdinfo->work); in async_completion()
2346 struct kthread_worker *worker; member
2366 send_sig(SIGKILL, dq->worker->task, 1); in nvme_wait_dq()
2367 flush_kthread_worker(dq->worker); in nvme_wait_dq()
2451 DEFINE_KTHREAD_WORKER_ONSTACK(worker); in nvme_disable_io_queues()
2454 &worker, "nvme%d", dev->instance); in nvme_disable_io_queues()
2466 dq.worker = &worker; in nvme_disable_io_queues()
2473 nvmeq->cmdinfo.worker = dq.worker; in nvme_disable_io_queues()
2475 queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work); in nvme_disable_io_queues()
/drivers/media/usb/hdpvr/
Dhdpvr.h112 struct work_struct worker; member
Dhdpvr-video.c254 worker); in hdpvr_transmit_buffers()
318 INIT_WORK(&dev->worker, hdpvr_transmit_buffers); in hdpvr_start_streaming()
319 queue_work(dev->workqueue, &dev->worker); in hdpvr_start_streaming()
/drivers/s390/block/
Ddasd_alias.c153 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work); in _allocate_lcu()
265 cancel_work_sync(&lcu->suc_data.worker); in dasd_alias_disconnect_device_from_lcu()
917 worker); in summary_unit_check_handling_work()
999 if (!schedule_work(&lcu->suc_data.worker)) in dasd_alias_handle_summary_unit_check()
Ddasd_eckd.h462 struct work_struct worker; member
Ddasd_eckd.c106 struct work_struct worker; member
118 struct work_struct worker; member
1254 data = container_of(work, struct path_verification_work_data, worker); in do_path_verification_work()
1428 INIT_WORK(&data->worker, do_path_verification_work); in dasd_eckd_verify_path()
1432 schedule_work(&data->worker); in dasd_eckd_verify_path()
4806 data = container_of(work, struct check_attention_work_data, worker); in dasd_eckd_check_attention_work()
4837 INIT_WORK(&data->worker, dasd_eckd_check_attention_work); in dasd_eckd_check_attention()
4841 schedule_work(&data->worker); in dasd_eckd_check_attention()
/drivers/staging/speakup/
DTODO21 in these cases. Pushing work to some worker thread would probably help,
/drivers/block/drbd/
Ddrbd_main.c2218 struct work_struct worker; member
2226 struct retry_worker *retry = container_of(ws, struct retry_worker, worker); in do_retry()
2290 queue_work(retry.wq, &retry.worker); in drbd_restart_request()
2572 connection->worker.reset_cpu_mask = 1; in set_resource_options()
2658 drbd_thread_init(resource, &connection->worker, drbd_worker, "worker"); in conn_create()
2659 connection->worker.connection = connection; in conn_create()
2715 INIT_WORK(&device->submit.worker, do_submit); in init_submitter()
2964 INIT_WORK(&retry.worker, do_retry); in drbd_init()
3578 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task); in drbd_queue_bitmap_io()
3616 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task); in drbd_bitmap_io()
Ddrbd_nl.c338 if (current == connection->worker.task) in drbd_khelper()
365 if (current == connection->worker.task) in drbd_khelper()
1231 drbd_thread_start(&connection->worker); in conn_reconfig_start()
1247 drbd_thread_stop(&connection->worker); in conn_reconfig_done()
3582 drbd_thread_stop(&connection->worker); in drbd_adm_down()
3635 drbd_thread_stop(&connection->worker); in drbd_adm_del_resource()
/drivers/md/
Ddm-era-target.c1153 struct work_struct worker; member
1207 queue_work(era->wq, &era->worker); in wake_worker()
1311 struct era *era = container_of(ws, struct era, worker); in do_work()
1506 INIT_WORK(&era->worker, do_work); in era_ctr()
Ddm-thin.c179 struct work_struct worker; member
257 queue_work(pool->wq, &pool->worker); in wake_worker()
1672 struct pool *pool = container_of(ws, struct pool, worker); in do_worker()
1707 struct work_struct worker; member
1713 return container_of(ws, struct pool_work, worker); in to_pool_work()
1724 INIT_WORK_ONSTACK(&pw->worker, fn); in pool_work_wait()
1726 queue_work(pool->wq, &pw->worker); in pool_work_wait()
2229 INIT_WORK(&pool->worker, do_worker); in pool_create()
Draid5.c5159 struct r5worker *worker, in handle_active_stripes() argument
5203 struct r5worker *worker = container_of(work, struct r5worker, work); in raid5_do_work() local
5204 struct r5worker_group *group = worker->group; in raid5_do_work()
5218 released = release_stripe_list(conf, worker->temp_inactive_list); in raid5_do_work()
5220 batch_size = handle_active_stripes(conf, group_id, worker, in raid5_do_work()
5221 worker->temp_inactive_list); in raid5_do_work()
5222 worker->working = false; in raid5_do_work()
5581 struct r5worker *worker = group->workers + j; in alloc_thread_groups() local
5582 worker->group = group; in alloc_thread_groups()
5583 INIT_WORK(&worker->work, raid5_do_work); in alloc_thread_groups()
[all …]
Ddm-cache-target.c258 struct work_struct worker; member
341 queue_work(cache->wq, &cache->worker); in wake_worker()
1728 struct cache *cache = container_of(ws, struct cache, worker); in do_worker()
2344 INIT_WORK(&cache->worker, do_worker); in cache_create()

12