Home
last modified time | relevance | path

Searched refs:tsk (Results 1 – 25 of 25) sorted by relevance

/drivers/dma/bestcomm/
Dfec.c87 struct bcom_task *tsk; in bcom_fec_rx_init() local
90 tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd), in bcom_fec_rx_init()
92 if (!tsk) in bcom_fec_rx_init()
95 tsk->flags = BCOM_FLAGS_NONE; in bcom_fec_rx_init()
97 priv = tsk->priv; in bcom_fec_rx_init()
101 if (bcom_fec_rx_reset(tsk)) { in bcom_fec_rx_init()
102 bcom_task_free(tsk); in bcom_fec_rx_init()
106 return tsk; in bcom_fec_rx_init()
111 bcom_fec_rx_reset(struct bcom_task *tsk) in bcom_fec_rx_reset() argument
113 struct bcom_fec_priv *priv = tsk->priv; in bcom_fec_rx_reset()
[all …]
Dgen_bd.c88 struct bcom_task *tsk; in bcom_gen_bd_rx_init() local
91 tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd), in bcom_gen_bd_rx_init()
93 if (!tsk) in bcom_gen_bd_rx_init()
96 tsk->flags = BCOM_FLAGS_NONE; in bcom_gen_bd_rx_init()
98 priv = tsk->priv; in bcom_gen_bd_rx_init()
104 if (bcom_gen_bd_rx_reset(tsk)) { in bcom_gen_bd_rx_init()
105 bcom_task_free(tsk); in bcom_gen_bd_rx_init()
109 return tsk; in bcom_gen_bd_rx_init()
114 bcom_gen_bd_rx_reset(struct bcom_task *tsk) in bcom_gen_bd_rx_reset() argument
116 struct bcom_gen_bd_priv *priv = tsk->priv; in bcom_gen_bd_rx_reset()
[all …]
Data.c60 struct bcom_task *tsk; in bcom_ata_init() local
67 tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0); in bcom_ata_init()
68 if (!tsk) in bcom_ata_init()
71 tsk->flags = BCOM_FLAGS_NONE; in bcom_ata_init()
73 bcom_ata_reset_bd(tsk); in bcom_ata_init()
75 var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum); in bcom_ata_init()
76 inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum); in bcom_ata_init()
78 if (bcom_load_image(tsk->tasknum, bcom_ata_task)) { in bcom_ata_init()
79 bcom_task_free(tsk); in bcom_ata_init()
84 offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); in bcom_ata_init()
[all …]
Dbestcomm.c53 struct bcom_task *tsk; in bcom_task_alloc() local
75 tsk = kzalloc(sizeof(struct bcom_task) + priv_size, GFP_KERNEL); in bcom_task_alloc()
76 if (!tsk) in bcom_task_alloc()
79 tsk->tasknum = tasknum; in bcom_task_alloc()
81 tsk->priv = (void*)tsk + sizeof(struct bcom_task); in bcom_task_alloc()
84 tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum); in bcom_task_alloc()
85 if (!tsk->irq) in bcom_task_alloc()
90 tsk->cookie = kmalloc_array(bd_count, sizeof(void *), in bcom_task_alloc()
92 if (!tsk->cookie) in bcom_task_alloc()
95 tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa); in bcom_task_alloc()
[all …]
/drivers/dma-buf/
Dst-dma-fence-chain.c571 struct task_struct *tsk; in wait_forward() local
579 tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait"); in wait_forward()
580 if (IS_ERR(tsk)) { in wait_forward()
581 err = PTR_ERR(tsk); in wait_forward()
584 get_task_struct(tsk); in wait_forward()
585 yield_to(tsk, true); in wait_forward()
590 err = kthread_stop(tsk); in wait_forward()
591 put_task_struct(tsk); in wait_forward()
601 struct task_struct *tsk; in wait_backward() local
609 tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait"); in wait_backward()
[all …]
/drivers/gpu/drm/i915/gt/
Dselftest_migrate.c331 struct task_struct *tsk; member
350 struct task_struct *tsk; in threaded_migrate() local
356 tsk = kthread_run(fn, &thread[i], "igt-%d", i); in threaded_migrate()
357 if (IS_ERR(tsk)) { in threaded_migrate()
358 err = PTR_ERR(tsk); in threaded_migrate()
362 get_task_struct(tsk); in threaded_migrate()
363 thread[i].tsk = tsk; in threaded_migrate()
369 struct task_struct *tsk = thread[i].tsk; in threaded_migrate() local
372 if (IS_ERR_OR_NULL(tsk)) in threaded_migrate()
375 status = kthread_stop(tsk); in threaded_migrate()
[all …]
Dselftest_hangcheck.c1435 struct task_struct *tsk = NULL; in __igt_reset_evict_vma() local
1542 tsk = kthread_run(fn, &arg, "igt/evict_vma"); in __igt_reset_evict_vma()
1543 if (IS_ERR(tsk)) { in __igt_reset_evict_vma()
1544 err = PTR_ERR(tsk); in __igt_reset_evict_vma()
1546 tsk = NULL; in __igt_reset_evict_vma()
1549 get_task_struct(tsk); in __igt_reset_evict_vma()
1568 if (tsk) { in __igt_reset_evict_vma()
1573 err = kthread_stop(tsk); in __igt_reset_evict_vma()
1575 put_task_struct(tsk); in __igt_reset_evict_vma()
/drivers/powercap/
Didle_inject.c56 struct task_struct *tsk; member
94 wake_up_process(iit->tsk); in idle_inject_wakeup()
257 wait_task_inactive(iit->tsk, 0); in idle_inject_stop()
359 .store = &idle_inject_thread.tsk,
/drivers/tty/
Dtty_ldsem.c77 struct task_struct *tsk; in __ldsem_wake_readers() local
95 tsk = waiter->task; in __ldsem_wake_readers()
97 wake_up_process(tsk); in __ldsem_wake_readers()
98 put_task_struct(tsk); in __ldsem_wake_readers()
Dtty_jobctrl.c345 struct task_struct *tsk = current; in no_tty() local
348 proc_clear_tty(tsk); in no_tty()
/drivers/net/ppp/
Dppp_synctty.c70 struct tasklet_struct tsk; member
180 tasklet_setup(&ap->tsk, ppp_sync_process); in ppp_sync_open()
234 tasklet_kill(&ap->tsk); in ppp_sync_close()
347 tasklet_schedule(&ap->tsk); in ppp_sync_receive()
361 tasklet_schedule(&ap->tsk); in ppp_sync_wakeup()
490 struct syncppp *ap = from_tasklet(ap, t, tsk); in ppp_sync_process()
Dppp_async.c66 struct tasklet_struct tsk; member
182 tasklet_setup(&ap->tsk, ppp_async_process); in ppp_asynctty_open()
235 tasklet_kill(&ap->tsk); in ppp_asynctty_close()
354 tasklet_schedule(&ap->tsk); in ppp_asynctty_receive()
368 tasklet_schedule(&ap->tsk); in ppp_asynctty_wakeup()
498 struct asyncppp *ap = from_tasklet(ap, t, tsk); in ppp_async_process()
/drivers/pcmcia/
Dcs.c105 struct task_struct *tsk; in pcmcia_register_socket() local
169 tsk = kthread_run(pccardd, socket, "pccardd"); in pcmcia_register_socket()
170 if (IS_ERR(tsk)) { in pcmcia_register_socket()
171 ret = PTR_ERR(tsk); in pcmcia_register_socket()
/drivers/scsi/qla2xxx/
Dqla_mbx.c3328 struct tsk_mgmt_entry tsk; member
3338 struct tsk_mgmt_cmd *tsk; in __qla24xx_issue_tmf() local
3359 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); in __qla24xx_issue_tmf()
3360 if (tsk == NULL) { in __qla24xx_issue_tmf()
3366 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; in __qla24xx_issue_tmf()
3367 tsk->p.tsk.entry_count = 1; in __qla24xx_issue_tmf()
3368 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle); in __qla24xx_issue_tmf()
3369 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); in __qla24xx_issue_tmf()
3370 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); in __qla24xx_issue_tmf()
3371 tsk->p.tsk.control_flags = cpu_to_le32(type); in __qla24xx_issue_tmf()
[all …]
Dqla_iocb.c2541 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) in qla24xx_tm_iocb() argument
2554 tsk->entry_type = TSK_MGMT_IOCB_TYPE; in qla24xx_tm_iocb()
2555 tsk->entry_count = 1; in qla24xx_tm_iocb()
2556 tsk->handle = make_handle(req->id, tsk->handle); in qla24xx_tm_iocb()
2557 tsk->nport_handle = cpu_to_le16(fcport->loop_id); in qla24xx_tm_iocb()
2558 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); in qla24xx_tm_iocb()
2559 tsk->control_flags = cpu_to_le32(flags); in qla24xx_tm_iocb()
2560 tsk->port_id[0] = fcport->d_id.b.al_pa; in qla24xx_tm_iocb()
2561 tsk->port_id[1] = fcport->d_id.b.area; in qla24xx_tm_iocb()
2562 tsk->port_id[2] = fcport->d_id.b.domain; in qla24xx_tm_iocb()
[all …]
Dqla_isr.c2469 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) in qla24xx_tm_iocb_entry() argument
2476 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; in qla24xx_tm_iocb_entry()
2479 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); in qla24xx_tm_iocb_entry()
2545 void *tsk, srb_t *sp) in qla24xx_nvme_iocb_entry() argument
2549 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; in qla24xx_nvme_iocb_entry()
/drivers/gpu/drm/i915/
Di915_request.c1782 struct task_struct *tsk; member
1789 wake_up_process(fetch_and_zero(&wait->tsk)); in request_wait_wake()
1876 wait.tsk = current; in i915_request_wait()
1918 if (READ_ONCE(wait.tsk)) in i915_request_wait()
/drivers/android/
Dbinder_internal.h442 struct task_struct *tsk; member
Dbinder.c2920 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code && in binder_can_update_transaction()
3403 t->sender_euid = task_euid(proc->tsk); in binder_transaction()
4881 struct task_struct *sender = t_from->proc->tsk; in binder_thread_read()
5145 put_task_struct(proc->tsk); in binder_free_proc()
5810 if (proc->tsk != current->group_leader) in binder_mmap()
5853 proc->tsk = current->group_leader; in binder_open()
/drivers/base/power/
Dmain.c500 struct task_struct *tsk; member
520 show_stack(wd->tsk, NULL, KERN_EMERG); in dpm_watchdog_handler()
535 wd->tsk = current; in dpm_watchdog_set()
/drivers/scsi/ibmvscsi_tgt/
Dibmvscsi_tgt.c2379 struct srp_tsk_mgmt *tsk; in ibmvscsis_srp_cmd() local
2413 tsk = &vio_iu(iue)->srp.tsk_mgmt; in ibmvscsis_srp_cmd()
2415 tsk->tag, tsk->tag); in ibmvscsis_srp_cmd()
2416 cmd->rsp.tag = tsk->tag; in ibmvscsis_srp_cmd()
/drivers/md/
Dmd.h690 struct task_struct *tsk; member
Dmd.c499 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); in mddev_suspend()
6371 wake_up_process(mddev->sync_thread->tsk); in md_set_readonly()
6434 wake_up_process(mddev->sync_thread->tsk); in do_md_stop()
7982 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); in md_wakeup_thread()
8003 thread->tsk = kthread_run(md_thread, thread, in md_register_thread()
8007 if (IS_ERR(thread->tsk)) { in md_register_thread()
8032 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); in md_unregister_thread()
8033 kthread_stop(thread->tsk); in md_unregister_thread()
Draid5-cache.c1584 kthread_park(log->reclaim_thread->tsk); in r5l_quiesce()
1588 kthread_unpark(log->reclaim_thread->tsk); in r5l_quiesce()
Draid10.c987 if (conf->mddev->thread->tsk == current) { in stop_waiting_barrier()