Home
last modified time | relevance | path

Searched refs:tsk (Results 1 – 25 of 25) sorted by relevance

/drivers/dma/bestcomm/
Dfec.c83 struct bcom_task *tsk; in bcom_fec_rx_init() local
86 tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd), in bcom_fec_rx_init()
88 if (!tsk) in bcom_fec_rx_init()
91 tsk->flags = BCOM_FLAGS_NONE; in bcom_fec_rx_init()
93 priv = tsk->priv; in bcom_fec_rx_init()
97 if (bcom_fec_rx_reset(tsk)) { in bcom_fec_rx_init()
98 bcom_task_free(tsk); in bcom_fec_rx_init()
102 return tsk; in bcom_fec_rx_init()
107 bcom_fec_rx_reset(struct bcom_task *tsk) in bcom_fec_rx_reset() argument
109 struct bcom_fec_priv *priv = tsk->priv; in bcom_fec_rx_reset()
[all …]
Dgen_bd.c88 struct bcom_task *tsk; in bcom_gen_bd_rx_init() local
91 tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd), in bcom_gen_bd_rx_init()
93 if (!tsk) in bcom_gen_bd_rx_init()
96 tsk->flags = BCOM_FLAGS_NONE; in bcom_gen_bd_rx_init()
98 priv = tsk->priv; in bcom_gen_bd_rx_init()
104 if (bcom_gen_bd_rx_reset(tsk)) { in bcom_gen_bd_rx_init()
105 bcom_task_free(tsk); in bcom_gen_bd_rx_init()
109 return tsk; in bcom_gen_bd_rx_init()
114 bcom_gen_bd_rx_reset(struct bcom_task *tsk) in bcom_gen_bd_rx_reset() argument
116 struct bcom_gen_bd_priv *priv = tsk->priv; in bcom_gen_bd_rx_reset()
[all …]
Data.c56 struct bcom_task *tsk; in bcom_ata_init() local
63 tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0); in bcom_ata_init()
64 if (!tsk) in bcom_ata_init()
67 tsk->flags = BCOM_FLAGS_NONE; in bcom_ata_init()
69 bcom_ata_reset_bd(tsk); in bcom_ata_init()
71 var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum); in bcom_ata_init()
72 inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum); in bcom_ata_init()
74 if (bcom_load_image(tsk->tasknum, bcom_ata_task)) { in bcom_ata_init()
75 bcom_task_free(tsk); in bcom_ata_init()
80 offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); in bcom_ata_init()
[all …]
Dbestcomm.c51 struct bcom_task *tsk; in bcom_task_alloc() local
73 tsk = kzalloc(sizeof(struct bcom_task) + priv_size, GFP_KERNEL); in bcom_task_alloc()
74 if (!tsk) in bcom_task_alloc()
77 tsk->tasknum = tasknum; in bcom_task_alloc()
79 tsk->priv = (void*)tsk + sizeof(struct bcom_task); in bcom_task_alloc()
82 tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum); in bcom_task_alloc()
83 if (!tsk->irq) in bcom_task_alloc()
88 tsk->cookie = kmalloc_array(bd_count, sizeof(void *), in bcom_task_alloc()
90 if (!tsk->cookie) in bcom_task_alloc()
93 tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa); in bcom_task_alloc()
[all …]
/drivers/dma-buf/
Dst-dma-fence-chain.c575 struct task_struct *tsk; in wait_forward() local
583 tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait"); in wait_forward()
584 if (IS_ERR(tsk)) { in wait_forward()
585 err = PTR_ERR(tsk); in wait_forward()
588 get_task_struct(tsk); in wait_forward()
589 yield_to(tsk, true); in wait_forward()
594 err = kthread_stop(tsk); in wait_forward()
595 put_task_struct(tsk); in wait_forward()
605 struct task_struct *tsk; in wait_backward() local
613 tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait"); in wait_backward()
[all …]
/drivers/gpu/drm/i915/gt/
Dselftest_migrate.c531 struct task_struct *tsk; member
550 struct task_struct *tsk; in threaded_migrate() local
556 tsk = kthread_run(fn, &thread[i], "igt-%d", i); in threaded_migrate()
557 if (IS_ERR(tsk)) { in threaded_migrate()
558 err = PTR_ERR(tsk); in threaded_migrate()
562 get_task_struct(tsk); in threaded_migrate()
563 thread[i].tsk = tsk; in threaded_migrate()
569 struct task_struct *tsk = thread[i].tsk; in threaded_migrate() local
572 if (IS_ERR_OR_NULL(tsk)) in threaded_migrate()
575 status = kthread_stop(tsk); in threaded_migrate()
[all …]
Dselftest_hangcheck.c1448 struct task_struct *tsk = NULL; in __igt_reset_evict_vma() local
1557 tsk = kthread_run(fn, &arg, "igt/evict_vma"); in __igt_reset_evict_vma()
1558 if (IS_ERR(tsk)) { in __igt_reset_evict_vma()
1559 err = PTR_ERR(tsk); in __igt_reset_evict_vma()
1561 tsk = NULL; in __igt_reset_evict_vma()
1564 get_task_struct(tsk); in __igt_reset_evict_vma()
1583 if (tsk) { in __igt_reset_evict_vma()
1588 err = kthread_stop(tsk); in __igt_reset_evict_vma()
1590 put_task_struct(tsk); in __igt_reset_evict_vma()
/drivers/powercap/
Didle_inject.c56 struct task_struct *tsk; member
94 wake_up_process(iit->tsk); in idle_inject_wakeup()
261 wait_task_inactive(iit->tsk, TASK_ANY); in idle_inject_stop()
366 .store = &idle_inject_thread.tsk,
/drivers/net/ppp/
Dppp_synctty.c70 struct tasklet_struct tsk; member
180 tasklet_setup(&ap->tsk, ppp_sync_process); in ppp_sync_open()
234 tasklet_kill(&ap->tsk); in ppp_sync_close()
345 tasklet_schedule(&ap->tsk); in ppp_sync_receive()
359 tasklet_schedule(&ap->tsk); in ppp_sync_wakeup()
488 struct syncppp *ap = from_tasklet(ap, t, tsk); in ppp_sync_process()
Dppp_async.c66 struct tasklet_struct tsk; member
182 tasklet_setup(&ap->tsk, ppp_async_process); in ppp_asynctty_open()
235 tasklet_kill(&ap->tsk); in ppp_asynctty_close()
352 tasklet_schedule(&ap->tsk); in ppp_asynctty_receive()
366 tasklet_schedule(&ap->tsk); in ppp_asynctty_wakeup()
496 struct asyncppp *ap = from_tasklet(ap, t, tsk); in ppp_async_process()
/drivers/tty/
Dtty_ldsem.c77 struct task_struct *tsk; in __ldsem_wake_readers() local
95 tsk = waiter->task; in __ldsem_wake_readers()
97 wake_up_process(tsk); in __ldsem_wake_readers()
98 put_task_struct(tsk); in __ldsem_wake_readers()
Dtty_jobctrl.c345 struct task_struct *tsk = current; in no_tty() local
348 proc_clear_tty(tsk); in no_tty()
/drivers/pcmcia/
Dcs.c105 struct task_struct *tsk; in pcmcia_register_socket() local
169 tsk = kthread_run(pccardd, socket, "pccardd"); in pcmcia_register_socket()
170 if (IS_ERR(tsk)) { in pcmcia_register_socket()
171 ret = PTR_ERR(tsk); in pcmcia_register_socket()
/drivers/scsi/qla2xxx/
Dqla_mbx.c3334 struct tsk_mgmt_entry tsk; member
3344 struct tsk_mgmt_cmd *tsk; in __qla24xx_issue_tmf() local
3365 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); in __qla24xx_issue_tmf()
3366 if (tsk == NULL) { in __qla24xx_issue_tmf()
3372 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; in __qla24xx_issue_tmf()
3373 tsk->p.tsk.entry_count = 1; in __qla24xx_issue_tmf()
3374 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle); in __qla24xx_issue_tmf()
3375 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); in __qla24xx_issue_tmf()
3376 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); in __qla24xx_issue_tmf()
3377 tsk->p.tsk.control_flags = cpu_to_le32(type); in __qla24xx_issue_tmf()
[all …]
Dqla_iocb.c2541 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) in qla24xx_tm_iocb() argument
2554 tsk->entry_type = TSK_MGMT_IOCB_TYPE; in qla24xx_tm_iocb()
2555 tsk->entry_count = 1; in qla24xx_tm_iocb()
2556 tsk->handle = make_handle(req->id, tsk->handle); in qla24xx_tm_iocb()
2557 tsk->nport_handle = cpu_to_le16(fcport->loop_id); in qla24xx_tm_iocb()
2558 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); in qla24xx_tm_iocb()
2559 tsk->control_flags = cpu_to_le32(flags); in qla24xx_tm_iocb()
2560 tsk->port_id[0] = fcport->d_id.b.al_pa; in qla24xx_tm_iocb()
2561 tsk->port_id[1] = fcport->d_id.b.area; in qla24xx_tm_iocb()
2562 tsk->port_id[2] = fcport->d_id.b.domain; in qla24xx_tm_iocb()
[all …]
Dqla_isr.c2495 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) in qla24xx_tm_iocb_entry() argument
2502 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; in qla24xx_tm_iocb_entry()
2505 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); in qla24xx_tm_iocb_entry()
2571 void *tsk, srb_t *sp) in qla24xx_nvme_iocb_entry() argument
2575 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; in qla24xx_nvme_iocb_entry()
/drivers/android/
Dbinder_internal.h436 struct task_struct *tsk; member
Dbinder.c1321 trace_android_vh_binder_new_ref(proc->tsk, new_ref->data.desc, new_ref->node->debug_id); in binder_get_ref_for_node_olocked()
1489 trace_android_vh_binder_del_ref(ref->proc ? ref->proc->tsk : NULL, in binder_free_ref()
2903 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code && in binder_can_update_transaction()
2998 trace_android_vh_binder_proc_transaction(current, proc->tsk, in binder_proc_transaction()
3415 t->sender_euid = task_euid(proc->tsk); in binder_transaction()
4915 struct task_struct *sender = t_from->proc->tsk; in binder_thread_read()
5181 put_task_struct(proc->tsk); in binder_free_proc()
5867 if (proc->tsk != current->group_leader) in binder_mmap()
5906 proc->tsk = current->group_leader; in binder_open()
/drivers/gpu/drm/i915/
Di915_request.c1928 struct task_struct *tsk; member
1935 wake_up_process(fetch_and_zero(&wait->tsk)); in request_wait_wake()
2026 wait.tsk = current; in i915_request_wait_timeout()
2068 if (READ_ONCE(wait.tsk)) in i915_request_wait_timeout()
/drivers/base/power/
Dmain.c499 struct task_struct *tsk; member
519 show_stack(wd->tsk, NULL, KERN_EMERG); in dpm_watchdog_handler()
534 wd->tsk = current; in dpm_watchdog_set()
/drivers/scsi/ibmvscsi_tgt/
Dibmvscsi_tgt.c2376 struct srp_tsk_mgmt *tsk; in ibmvscsis_srp_cmd() local
2410 tsk = &vio_iu(iue)->srp.tsk_mgmt; in ibmvscsis_srp_cmd()
2412 tsk->tag, tsk->tag); in ibmvscsis_srp_cmd()
2413 cmd->rsp.tag = tsk->tag; in ibmvscsis_srp_cmd()
/drivers/md/
Dmd.h707 struct task_struct *tsk; member
Dmd.c480 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); in mddev_suspend()
6379 wake_up_process(mddev->sync_thread->tsk); in md_set_readonly()
6442 wake_up_process(mddev->sync_thread->tsk); in do_md_stop()
7990 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); in md_wakeup_thread()
8011 thread->tsk = kthread_run(md_thread, thread, in md_register_thread()
8015 if (IS_ERR(thread->tsk)) { in md_register_thread()
8040 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); in md_unregister_thread()
8041 kthread_stop(thread->tsk); in md_unregister_thread()
Draid5-cache.c1583 kthread_park(log->reclaim_thread->tsk); in r5l_quiesce()
1587 kthread_unpark(log->reclaim_thread->tsk); in r5l_quiesce()
Draid10.c1002 if (conf->mddev->thread->tsk == current) { in stop_waiting_barrier()