| /drivers/gpu/drm/ |
| D | drm_flip_work.c | 46 static void drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task) in drm_flip_work_queue_task() argument 50 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_queue_task() 51 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 52 spin_unlock_irqrestore(&work->lock, flags); in drm_flip_work_queue_task() 63 void drm_flip_work_queue(struct drm_flip_work *work, void *val) in drm_flip_work_queue() argument 70 drm_flip_work_queue_task(work, task); in drm_flip_work_queue() 72 DRM_ERROR("%s could not allocate task!\n", work->name); in drm_flip_work_queue() 73 work->func(work, val); in drm_flip_work_queue() 88 void drm_flip_work_commit(struct drm_flip_work *work, in drm_flip_work_commit() argument 93 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_commit() [all …]
|
| D | drm_vblank_work.c | 48 struct drm_vblank_work *work, *next; in drm_handle_vblank_works() local 54 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_handle_vblank_works() 55 if (!drm_vblank_passed(count, work->count)) in drm_handle_vblank_works() 58 list_del_init(&work->node); in drm_handle_vblank_works() 60 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works() 72 struct drm_vblank_work *work, *next; in drm_vblank_cancel_pending_works() local 79 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_vblank_cancel_pending_works() 80 list_del_init(&work->node); in drm_vblank_cancel_pending_works() 109 int drm_vblank_work_schedule(struct drm_vblank_work *work, in drm_vblank_work_schedule() argument 112 struct drm_vblank_crtc *vblank = work->vblank; in drm_vblank_work_schedule() [all …]
|
| /drivers/staging/octeon/ |
| D | ethernet-rx.c | 63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) in cvm_oct_check_rcv_error() argument 68 port = work->word0.pip.cn68xx.pknd; in cvm_oct_check_rcv_error() 70 port = work->word1.cn38xx.ipprt; in cvm_oct_check_rcv_error() 72 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) in cvm_oct_check_rcv_error() 81 if (work->word2.snoip.err_code == 5 || in cvm_oct_check_rcv_error() 82 work->word2.snoip.err_code == 7) { in cvm_oct_check_rcv_error() 99 cvmx_phys_to_ptr(work->packet_ptr.s.addr); in cvm_oct_check_rcv_error() 102 while (i < work->word1.len - 1) { in cvm_oct_check_rcv_error() 111 work->packet_ptr.s.addr += i + 1; in cvm_oct_check_rcv_error() 112 work->word1.len -= i + 5; in cvm_oct_check_rcv_error() [all …]
|
| D | ethernet-tx.c | 514 struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); in cvm_oct_xmit_pow() local 516 if (unlikely(!work)) { in cvm_oct_xmit_pow() 529 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); in cvm_oct_xmit_pow() 559 work->word0.pip.cn38xx.hw_chksum = skb->csum; in cvm_oct_xmit_pow() 560 work->word1.len = skb->len; in cvm_oct_xmit_pow() 561 cvmx_wqe_set_port(work, priv->port); in cvm_oct_xmit_pow() 562 cvmx_wqe_set_qos(work, priv->port & 0x7); in cvm_oct_xmit_pow() 563 cvmx_wqe_set_grp(work, pow_send_group); in cvm_oct_xmit_pow() 564 work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE; in cvm_oct_xmit_pow() 565 work->word1.tag = pow_send_group; /* FIXME */ in cvm_oct_xmit_pow() [all …]
|
| /drivers/infiniband/core/ |
| D | cm.c | 93 struct cm_work *work); 185 struct delayed_work work; member 196 struct cm_work work; member 268 static void cm_work_handler(struct work_struct *work); 694 __be32 remote_id = timewait_info->work.remote_id; in cm_insert_remote_id() 700 if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id() 702 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id() 728 if (be32_lt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id() 730 else if (be32_gt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id() 737 res = cm_acquire_id(timewait_info->work.local_id, in cm_find_remote_id() [all …]
|
| D | roce_gid_mgmt.c | 53 struct work_struct work; member 68 struct work_struct work; member 643 struct netdev_event_work *work = in netdevice_event_work_handler() local 644 container_of(_work, struct netdev_event_work, work); in netdevice_event_work_handler() 647 for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) { in netdevice_event_work_handler() 648 ib_enum_all_roce_netdevs(work->cmds[i].filter, in netdevice_event_work_handler() 649 work->cmds[i].filter_ndev, in netdevice_event_work_handler() 650 work->cmds[i].cb, in netdevice_event_work_handler() 651 work->cmds[i].ndev); in netdevice_event_work_handler() 652 dev_put(work->cmds[i].ndev); in netdevice_event_work_handler() [all …]
|
| D | iwcm.c | 96 struct work_struct work; member 142 struct iwcm_work *work; in get_work() local 146 work = list_first_entry(&cm_id_priv->work_free_list, struct iwcm_work, in get_work() 148 list_del_init(&work->free_list); in get_work() 149 return work; in get_work() 152 static void put_work(struct iwcm_work *work) in put_work() argument 154 list_add(&work->free_list, &work->cm_id->work_free_list); in put_work() 169 struct iwcm_work *work; in alloc_work_entries() local 173 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL); in alloc_work_entries() 174 if (!work) { in alloc_work_entries() [all …]
|
| /drivers/accessibility/speakup/ |
| D | selection.c | 20 struct work_struct work; member 25 static void __speakup_set_selection(struct work_struct *work) in __speakup_set_selection() argument 28 container_of(work, struct speakup_selection_work, work); in __speakup_set_selection() 58 .work = __WORK_INITIALIZER(speakup_sel_work.work, 87 schedule_work_on(WORK_CPU_UNBOUND, &speakup_sel_work.work); in speakup_set_selection() 96 cancel_work_sync(&speakup_sel_work.work); in speakup_cancel_selection() 106 static void __speakup_paste_selection(struct work_struct *work) in __speakup_paste_selection() argument 109 container_of(work, struct speakup_selection_work, work); in __speakup_paste_selection() 117 .work = __WORK_INITIALIZER(speakup_paste_work.work, 129 schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work); in speakup_paste_selection() [all …]
|
| /drivers/net/wireless/st/cw1200/ |
| D | sta.h | 59 void cw1200_event_handler(struct work_struct *work); 60 void cw1200_bss_loss_work(struct work_struct *work); 61 void cw1200_bss_params_work(struct work_struct *work); 62 void cw1200_keep_alive_work(struct work_struct *work); 63 void cw1200_tx_failure_work(struct work_struct *work); 79 void cw1200_join_timeout(struct work_struct *work); 80 void cw1200_unjoin_work(struct work_struct *work); 81 void cw1200_join_complete_work(struct work_struct *work); 82 void cw1200_wep_key_work(struct work_struct *work); 85 void cw1200_update_filtering_work(struct work_struct *work); [all …]
|
| /drivers/net/ethernet/mellanox/mlx5/core/lag/ |
| D | mpesw.c | 130 static void mlx5_mpesw_work(struct work_struct *work) in mlx5_mpesw_work() argument 132 struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work); in mlx5_mpesw_work() 161 struct mlx5_mpesw_work_st *work; in mlx5_lag_mpesw_queue_work() local 167 work = kzalloc(sizeof(*work), GFP_KERNEL); in mlx5_lag_mpesw_queue_work() 168 if (!work) in mlx5_lag_mpesw_queue_work() 171 INIT_WORK(&work->work, mlx5_mpesw_work); in mlx5_lag_mpesw_queue_work() 172 init_completion(&work->comp); in mlx5_lag_mpesw_queue_work() 173 work->op = op; in mlx5_lag_mpesw_queue_work() 174 work->lag = ldev; in mlx5_lag_mpesw_queue_work() 176 if (!queue_work(ldev->wq, &work->work)) { in mlx5_lag_mpesw_queue_work() [all …]
|
| /drivers/net/ethernet/mellanox/mlx5/core/sf/ |
| D | vhca_event.c | 19 struct work_struct work; member 103 struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work); in mlx5_vhca_state_work_handler() local 104 struct mlx5_vhca_state_notifier *notifier = work->notifier; in mlx5_vhca_state_work_handler() 107 mlx5_vhca_event_notify(dev, &work->event); in mlx5_vhca_state_work_handler() 108 kfree(work); in mlx5_vhca_state_work_handler() 111 void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work) in mlx5_vhca_events_work_enqueue() argument 113 queue_work(dev->priv.vhca_events->handler[idx].wq, work); in mlx5_vhca_events_work_enqueue() 121 struct mlx5_vhca_event_work *work; in mlx5_vhca_state_change_notifier() local 125 work = kzalloc(sizeof(*work), GFP_ATOMIC); in mlx5_vhca_state_change_notifier() 126 if (!work) in mlx5_vhca_state_change_notifier() [all …]
|
| /drivers/net/ethernet/mellanox/mlx5/core/en/rep/ |
| D | bridge.c | 15 struct work_struct work; member 364 static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work) in mlx5_esw_bridge_switchdev_fdb_event_work() argument 367 container_of(work, struct mlx5_bridge_switchdev_fdb_work, work); in mlx5_esw_bridge_switchdev_fdb_event_work() 398 struct mlx5_bridge_switchdev_fdb_work *work; in mlx5_esw_bridge_init_switchdev_fdb_work() local 401 work = kzalloc(sizeof(*work), GFP_ATOMIC); in mlx5_esw_bridge_init_switchdev_fdb_work() 402 if (!work) in mlx5_esw_bridge_init_switchdev_fdb_work() 405 INIT_WORK(&work->work, mlx5_esw_bridge_switchdev_fdb_event_work); in mlx5_esw_bridge_init_switchdev_fdb_work() 406 memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info)); in mlx5_esw_bridge_init_switchdev_fdb_work() 410 kfree(work); in mlx5_esw_bridge_init_switchdev_fdb_work() 414 work->fdb_info.addr = addr; in mlx5_esw_bridge_init_switchdev_fdb_work() [all …]
|
| /drivers/gpu/drm/msm/ |
| D | msm_io_utils.c | 117 struct msm_hrtimer_work *work = container_of(t, in msm_hrtimer_worktimer() local 120 kthread_queue_work(work->worker, &work->work); in msm_hrtimer_worktimer() 125 void msm_hrtimer_queue_work(struct msm_hrtimer_work *work, in msm_hrtimer_queue_work() argument 129 hrtimer_start(&work->timer, wakeup_time, mode); in msm_hrtimer_queue_work() 132 void msm_hrtimer_work_init(struct msm_hrtimer_work *work, in msm_hrtimer_work_init() argument 138 hrtimer_init(&work->timer, clock_id, mode); in msm_hrtimer_work_init() 139 work->timer.function = msm_hrtimer_worktimer; in msm_hrtimer_work_init() 140 work->worker = worker; in msm_hrtimer_work_init() 141 kthread_init_work(&work->work, fn); in msm_hrtimer_work_init()
|
| /drivers/md/ |
| D | dm-cache-background-tracker.c | 86 cmp = cmp_oblock(w->work.oblock, nw->work.oblock); in __insert_pending() 114 cmp = cmp_oblock(w->work.oblock, oblock); in __find_pending() 174 struct policy_work *work, in btracker_queue() argument 186 memcpy(&w->work, work, sizeof(*work)); in btracker_queue() 198 *pwork = &w->work; in btracker_queue() 202 update_stats(b, &w->work, 1); in btracker_queue() 211 int btracker_issue(struct background_tracker *b, struct policy_work **work) in btracker_issue() argument 220 *work = &w->work; in btracker_issue() 229 struct bt_work *w = container_of(op, struct bt_work, work); in btracker_complete() 231 update_stats(b, &w->work, -1); in btracker_complete()
|
| /drivers/infiniband/ulp/ipoib/ |
| D | ipoib_vlan.c | 215 struct work_struct work; member 229 static void ipoib_vlan_delete_task(struct work_struct *work) in ipoib_vlan_delete_task() argument 232 container_of(work, struct ipoib_vlan_delete_work, work); in ipoib_vlan_delete_task() 273 struct ipoib_vlan_delete_work *work; in ipoib_vlan_delete() local 275 work = kmalloc(sizeof(*work), GFP_KERNEL); in ipoib_vlan_delete() 276 if (!work) { in ipoib_vlan_delete() 284 work->dev = priv->dev; in ipoib_vlan_delete() 285 INIT_WORK(&work->work, ipoib_vlan_delete_task); in ipoib_vlan_delete() 286 queue_work(ipoib_workqueue, &work->work); in ipoib_vlan_delete()
|
| /drivers/scsi/libsas/ |
| D | sas_phy.c | 17 static void sas_phye_loss_of_signal(struct work_struct *work) in sas_phye_loss_of_signal() argument 19 struct asd_sas_event *ev = to_asd_sas_event(work); in sas_phye_loss_of_signal() 26 static void sas_phye_oob_done(struct work_struct *work) in sas_phye_oob_done() argument 28 struct asd_sas_event *ev = to_asd_sas_event(work); in sas_phye_oob_done() 34 static void sas_phye_oob_error(struct work_struct *work) in sas_phye_oob_error() argument 36 struct asd_sas_event *ev = to_asd_sas_event(work); in sas_phye_oob_error() 63 static void sas_phye_spinup_hold(struct work_struct *work) in sas_phye_spinup_hold() argument 65 struct asd_sas_event *ev = to_asd_sas_event(work); in sas_phye_spinup_hold() 75 static void sas_phye_resume_timeout(struct work_struct *work) in sas_phye_resume_timeout() argument 77 struct asd_sas_event *ev = to_asd_sas_event(work); in sas_phye_resume_timeout() [all …]
|
| D | sas_event.c | 25 return queue_work(ha->event_q, &sw->work); in sas_queue_work() 28 static bool sas_queue_event(int event, struct sas_work *work, in sas_queue_event() argument 35 rc = sas_queue_work(ha, work); in sas_queue_event() 51 sas_free_event(to_asd_sas_event(&sw->work)); in sas_queue_deferred_work() 124 static void sas_port_event_worker(struct work_struct *work) in sas_port_event_worker() argument 126 struct asd_sas_event *ev = to_asd_sas_event(work); in sas_port_event_worker() 130 sas_port_event_fns[ev->event](work); in sas_port_event_worker() 135 static void sas_phy_event_worker(struct work_struct *work) in sas_phy_event_worker() argument 137 struct asd_sas_event *ev = to_asd_sas_event(work); in sas_phy_event_worker() 141 sas_phy_event_fns[ev->event](work); in sas_phy_event_worker() [all …]
|
| /drivers/greybus/ |
| D | svc_watchdog.c | 16 struct delayed_work work; member 44 static void greybus_reset(struct work_struct *work) in greybus_reset() argument 63 static void do_work(struct work_struct *work) in do_work() argument 69 watchdog = container_of(work, struct gb_svc_watchdog, work.work); in do_work() 104 schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD); in do_work() 121 INIT_DELAYED_WORK(&watchdog->work, do_work); in gb_svc_watchdog_create() 179 schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD); in gb_svc_watchdog_enable() 195 cancel_delayed_work_sync(&watchdog->work); in gb_svc_watchdog_disable()
|
| /drivers/extcon/ |
| D | extcon-gpio.c | 37 struct delayed_work work; member 45 static void gpio_extcon_work(struct work_struct *work) in gpio_extcon_work() argument 49 container_of(to_delayed_work(work), struct gpio_extcon_data, in gpio_extcon_work() 50 work); in gpio_extcon_work() 60 queue_delayed_work(system_power_efficient_wq, &data->work, in gpio_irq_handler() 116 ret = devm_delayed_work_autocancel(dev, &data->work, gpio_extcon_work); in gpio_extcon_probe() 132 gpio_extcon_work(&data->work.work); in gpio_extcon_probe() 145 &data->work, data->debounce_jiffies); in gpio_extcon_resume()
|
| /drivers/gpu/drm/amd/amdgpu/ |
| D | amdgpu_display.c | 66 void amdgpu_display_hotplug_work_func(struct work_struct *work) in amdgpu_display_hotplug_work_func() argument 68 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, in amdgpu_display_hotplug_work_func() 69 hotplug_work.work); in amdgpu_display_hotplug_work_func() 93 struct amdgpu_flip_work *work = in amdgpu_display_flip_callback() local 97 schedule_work(&work->flip_work.work); in amdgpu_display_flip_callback() 100 static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work, in amdgpu_display_flip_handle_fence() argument 110 if (!dma_fence_add_callback(fence, &work->cb, in amdgpu_display_flip_handle_fence() 121 container_of(__work, struct delayed_work, work); in amdgpu_display_flip_work_func() 122 struct amdgpu_flip_work *work = in amdgpu_display_flip_work_func() local 124 struct amdgpu_device *adev = work->adev; in amdgpu_display_flip_work_func() [all …]
|
| /drivers/gpu/drm/amd/display/amdgpu_dm/ |
| D | amdgpu_dm_hdcp.c | 152 static void link_lock(struct hdcp_workqueue *work, bool lock) in link_lock() argument 156 for (i = 0; i < work->max_link; i++) { in link_lock() 158 mutex_lock(&work[i].mutex); in link_lock() 160 mutex_unlock(&work[i].mutex); in link_lock() 278 static void event_callback(struct work_struct *work) in event_callback() argument 282 hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, in event_callback() 295 static void event_property_update(struct work_struct *work) in event_property_update() argument 297 struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, in event_property_update() 365 static void event_property_validate(struct work_struct *work) in event_property_validate() argument 368 container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork); in event_property_validate() [all …]
|
| /drivers/input/misc/ |
| D | cs40l50-vibra.c | 91 struct work_struct work; member 264 static void cs40l50_add_worker(struct work_struct *work) in cs40l50_add_worker() argument 266 struct cs40l50_work *work_data = container_of(work, struct cs40l50_work, work); in cs40l50_add_worker() 339 INIT_WORK_ONSTACK(&work_data.work, cs40l50_add_worker); in cs40l50_add() 342 queue_work(vib->vib_wq, &work_data.work); in cs40l50_add() 343 flush_work(&work_data.work); in cs40l50_add() 344 destroy_work_on_stack(&work_data.work); in cs40l50_add() 351 static void cs40l50_start_worker(struct work_struct *work) in cs40l50_start_worker() argument 353 struct cs40l50_work *work_data = container_of(work, struct cs40l50_work, work); in cs40l50_start_worker() 377 static void cs40l50_stop_worker(struct work_struct *work) in cs40l50_stop_worker() argument [all …]
|
| D | wm831x-on.c | 32 struct delayed_work work; member 40 static void wm831x_poll_on(struct work_struct *work) in wm831x_poll_on() argument 42 struct wm831x_on *wm831x_on = container_of(work, struct wm831x_on, in wm831x_poll_on() 43 work.work); in wm831x_poll_on() 59 schedule_delayed_work(&wm831x_on->work, 100); in wm831x_poll_on() 66 schedule_delayed_work(&wm831x_on->work, 0); in wm831x_on_irq() 86 INIT_DELAYED_WORK(&wm831x_on->work, wm831x_poll_on); in wm831x_on_probe() 132 cancel_delayed_work_sync(&wm831x_on->work); in wm831x_on_remove()
|
| D | da9052_onkey.c | 21 struct delayed_work work; member 49 schedule_delayed_work(&onkey->work, in da9052_onkey_query() 54 static void da9052_onkey_work(struct work_struct *work) in da9052_onkey_work() argument 56 struct da9052_onkey *onkey = container_of(work, struct da9052_onkey, in da9052_onkey_work() 57 work.work); in da9052_onkey_work() 93 INIT_DELAYED_WORK(&onkey->work, da9052_onkey_work); in da9052_onkey_probe() 122 cancel_delayed_work_sync(&onkey->work); in da9052_onkey_probe() 135 cancel_delayed_work_sync(&onkey->work); in da9052_onkey_remove()
|
| /drivers/misc/cxl/ |
| D | file.c | 148 struct cxl_ioctl_start_work work; in afu_ioctl_start_work() local 156 if (copy_from_user(&work, uwork, sizeof(work))) in afu_ioctl_start_work() 169 if (work.reserved1 || work.reserved2 || work.reserved3 || in afu_ioctl_start_work() 170 work.reserved4 || work.reserved5 || in afu_ioctl_start_work() 171 (work.flags & ~CXL_START_WORK_ALL)) { in afu_ioctl_start_work() 176 if (!(work.flags & CXL_START_WORK_NUM_IRQS)) in afu_ioctl_start_work() 177 work.num_interrupts = ctx->afu->pp_irqs; in afu_ioctl_start_work() 178 else if ((work.num_interrupts < ctx->afu->pp_irqs) || in afu_ioctl_start_work() 179 (work.num_interrupts > ctx->afu->irqs_max)) { in afu_ioctl_start_work() 184 if ((rc = afu_register_irqs(ctx, work.num_interrupts))) in afu_ioctl_start_work() [all …]
|