1 /*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24 #include <core/notify.h>
25 #include <core/event.h>
26
27 static inline void
nvkm_notify_put_locked(struct nvkm_notify * notify)28 nvkm_notify_put_locked(struct nvkm_notify *notify)
29 {
30 if (notify->block++ == 0)
31 nvkm_event_put(notify->event, notify->types, notify->index);
32 }
33
34 void
nvkm_notify_put(struct nvkm_notify * notify)35 nvkm_notify_put(struct nvkm_notify *notify)
36 {
37 struct nvkm_event *event = notify->event;
38 unsigned long flags;
39 if (likely(event) &&
40 test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) {
41 spin_lock_irqsave(&event->refs_lock, flags);
42 nvkm_notify_put_locked(notify);
43 spin_unlock_irqrestore(&event->refs_lock, flags);
44 if (test_bit(NVKM_NOTIFY_WORK, ¬ify->flags))
45 flush_work(¬ify->work);
46 }
47 }
48
49 static inline void
nvkm_notify_get_locked(struct nvkm_notify * notify)50 nvkm_notify_get_locked(struct nvkm_notify *notify)
51 {
52 if (--notify->block == 0)
53 nvkm_event_get(notify->event, notify->types, notify->index);
54 }
55
56 void
nvkm_notify_get(struct nvkm_notify * notify)57 nvkm_notify_get(struct nvkm_notify *notify)
58 {
59 struct nvkm_event *event = notify->event;
60 unsigned long flags;
61 if (likely(event) &&
62 !test_and_set_bit(NVKM_NOTIFY_USER, ¬ify->flags)) {
63 spin_lock_irqsave(&event->refs_lock, flags);
64 nvkm_notify_get_locked(notify);
65 spin_unlock_irqrestore(&event->refs_lock, flags);
66 }
67 }
68
69 static inline void
nvkm_notify_func(struct nvkm_notify * notify)70 nvkm_notify_func(struct nvkm_notify *notify)
71 {
72 struct nvkm_event *event = notify->event;
73 int ret = notify->func(notify);
74 unsigned long flags;
75 if ((ret == NVKM_NOTIFY_KEEP) ||
76 !test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) {
77 spin_lock_irqsave(&event->refs_lock, flags);
78 nvkm_notify_get_locked(notify);
79 spin_unlock_irqrestore(&event->refs_lock, flags);
80 }
81 }
82
83 static void
nvkm_notify_work(struct work_struct * work)84 nvkm_notify_work(struct work_struct *work)
85 {
86 struct nvkm_notify *notify = container_of(work, typeof(*notify), work);
87 nvkm_notify_func(notify);
88 }
89
90 void
nvkm_notify_send(struct nvkm_notify * notify,void * data,u32 size)91 nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
92 {
93 struct nvkm_event *event = notify->event;
94 unsigned long flags;
95
96 assert_spin_locked(&event->list_lock);
97 BUG_ON(size != notify->size);
98
99 spin_lock_irqsave(&event->refs_lock, flags);
100 if (notify->block) {
101 spin_unlock_irqrestore(&event->refs_lock, flags);
102 return;
103 }
104 nvkm_notify_put_locked(notify);
105 spin_unlock_irqrestore(&event->refs_lock, flags);
106
107 if (test_bit(NVKM_NOTIFY_WORK, ¬ify->flags)) {
108 memcpy((void *)notify->data, data, size);
109 schedule_work(¬ify->work);
110 } else {
111 notify->data = data;
112 nvkm_notify_func(notify);
113 notify->data = NULL;
114 }
115 }
116
117 void
nvkm_notify_fini(struct nvkm_notify * notify)118 nvkm_notify_fini(struct nvkm_notify *notify)
119 {
120 unsigned long flags;
121 if (notify->event) {
122 nvkm_notify_put(notify);
123 spin_lock_irqsave(¬ify->event->list_lock, flags);
124 list_del(¬ify->head);
125 spin_unlock_irqrestore(¬ify->event->list_lock, flags);
126 kfree((void *)notify->data);
127 notify->event = NULL;
128 }
129 }
130
131 int
nvkm_notify_init(struct nvkm_object * object,struct nvkm_event * event,int (* func)(struct nvkm_notify *),bool work,void * data,u32 size,u32 reply,struct nvkm_notify * notify)132 nvkm_notify_init(struct nvkm_object *object, struct nvkm_event *event,
133 int (*func)(struct nvkm_notify *), bool work,
134 void *data, u32 size, u32 reply,
135 struct nvkm_notify *notify)
136 {
137 unsigned long flags;
138 int ret = -ENODEV;
139 if ((notify->event = event), event->refs) {
140 ret = event->func->ctor(object, data, size, notify);
141 if (ret == 0 && (ret = -EINVAL, notify->size == reply)) {
142 notify->flags = 0;
143 notify->block = 1;
144 notify->func = func;
145 notify->data = NULL;
146 if (ret = 0, work) {
147 INIT_WORK(¬ify->work, nvkm_notify_work);
148 set_bit(NVKM_NOTIFY_WORK, ¬ify->flags);
149 notify->data = kmalloc(reply, GFP_KERNEL);
150 if (!notify->data)
151 ret = -ENOMEM;
152 }
153 }
154 if (ret == 0) {
155 spin_lock_irqsave(&event->list_lock, flags);
156 list_add_tail(¬ify->head, &event->list);
157 spin_unlock_irqrestore(&event->list_lock, flags);
158 }
159 }
160 if (ret)
161 notify->event = NULL;
162 return ret;
163 }
164