1 /* Copyright libuv project contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22
23 #include "os390-syscalls.h"
24 #include <errno.h>
25 #include <stdlib.h>
26 #include <search.h>
27 #include <termios.h>
28 #include <sys/msg.h>
29
30 #define CW_INTRPT 1
31 #define CW_CONDVAR 32
32
33 #pragma linkage(BPX4CTW, OS)
34 #pragma linkage(BPX1CTW, OS)
35
36 static int number_of_epolls;
37 static QUEUE global_epoll_queue;
38 static uv_mutex_t global_epoll_lock;
39 static uv_once_t once = UV_ONCE_INIT;
40
scandir(const char * maindir,struct dirent *** namelist,int (* filter)(const struct dirent *),int (* compar)(const struct dirent **,const struct dirent **))41 int scandir(const char* maindir, struct dirent*** namelist,
42 int (*filter)(const struct dirent*),
43 int (*compar)(const struct dirent**,
44 const struct dirent **)) {
45 struct dirent** nl;
46 struct dirent** nl_copy;
47 struct dirent* dirent;
48 unsigned count;
49 size_t allocated;
50 DIR* mdir;
51
52 nl = NULL;
53 count = 0;
54 allocated = 0;
55 mdir = opendir(maindir);
56 if (!mdir)
57 return -1;
58
59 while (1) {
60 dirent = readdir(mdir);
61 if (!dirent)
62 break;
63 if (!filter || filter(dirent)) {
64 struct dirent* copy;
65 copy = uv__malloc(sizeof(*copy));
66 if (!copy)
67 goto error;
68 memcpy(copy, dirent, sizeof(*copy));
69
70 nl_copy = uv__realloc(nl, sizeof(*copy) * (count + 1));
71 if (nl_copy == NULL) {
72 uv__free(copy);
73 goto error;
74 }
75
76 nl = nl_copy;
77 nl[count++] = copy;
78 }
79 }
80
81 qsort(nl, count, sizeof(struct dirent *),
82 (int (*)(const void *, const void *)) compar);
83
84 closedir(mdir);
85
86 *namelist = nl;
87 return count;
88
89 error:
90 while (count > 0) {
91 dirent = nl[--count];
92 uv__free(dirent);
93 }
94 uv__free(nl);
95 closedir(mdir);
96 errno = ENOMEM;
97 return -1;
98 }
99
100
next_power_of_two(unsigned int val)101 static unsigned int next_power_of_two(unsigned int val) {
102 val -= 1;
103 val |= val >> 1;
104 val |= val >> 2;
105 val |= val >> 4;
106 val |= val >> 8;
107 val |= val >> 16;
108 val += 1;
109 return val;
110 }
111
112
maybe_resize(uv__os390_epoll * lst,unsigned int len)113 static void maybe_resize(uv__os390_epoll* lst, unsigned int len) {
114 unsigned int newsize;
115 unsigned int i;
116 struct pollfd* newlst;
117 struct pollfd event;
118
119 if (len <= lst->size)
120 return;
121
122 if (lst->size == 0)
123 event.fd = -1;
124 else {
125 /* Extract the message queue at the end. */
126 event = lst->items[lst->size - 1];
127 lst->items[lst->size - 1].fd = -1;
128 }
129
130 newsize = next_power_of_two(len);
131 newlst = uv__realloc(lst->items, newsize * sizeof(lst->items[0]));
132
133 if (newlst == NULL)
134 abort();
135 for (i = lst->size; i < newsize; ++i)
136 newlst[i].fd = -1;
137
138 /* Restore the message queue at the end */
139 newlst[newsize - 1] = event;
140
141 lst->items = newlst;
142 lst->size = newsize;
143 }
144
145
init_message_queue(uv__os390_epoll * lst)146 static void init_message_queue(uv__os390_epoll* lst) {
147 struct {
148 long int header;
149 char body;
150 } msg;
151
152 /* initialize message queue */
153 lst->msg_queue = msgget(IPC_PRIVATE, 0600 | IPC_CREAT);
154 if (lst->msg_queue == -1)
155 abort();
156
157 /*
158 On z/OS, the message queue will be affiliated with the process only
159 when a send is performed on it. Once this is done, the system
160 can be queried for all message queues belonging to our process id.
161 */
162 msg.header = 1;
163 if (msgsnd(lst->msg_queue, &msg, sizeof(msg.body), 0) != 0)
164 abort();
165
166 /* Clean up the dummy message sent above */
167 if (msgrcv(lst->msg_queue, &msg, sizeof(msg.body), 0, 0) != sizeof(msg.body))
168 abort();
169 }
170
171
before_fork(void)172 static void before_fork(void) {
173 uv_mutex_lock(&global_epoll_lock);
174 }
175
176
after_fork(void)177 static void after_fork(void) {
178 uv_mutex_unlock(&global_epoll_lock);
179 }
180
181
child_fork(void)182 static void child_fork(void) {
183 QUEUE* q;
184 uv_once_t child_once = UV_ONCE_INIT;
185
186 /* reset once */
187 memcpy(&once, &child_once, sizeof(child_once));
188
189 /* reset epoll list */
190 while (!QUEUE_EMPTY(&global_epoll_queue)) {
191 uv__os390_epoll* lst;
192 q = QUEUE_HEAD(&global_epoll_queue);
193 QUEUE_REMOVE(q);
194 lst = QUEUE_DATA(q, uv__os390_epoll, member);
195 uv__free(lst->items);
196 lst->items = NULL;
197 lst->size = 0;
198 }
199
200 uv_mutex_unlock(&global_epoll_lock);
201 uv_mutex_destroy(&global_epoll_lock);
202 }
203
204
epoll_init(void)205 static void epoll_init(void) {
206 QUEUE_INIT(&global_epoll_queue);
207 if (uv_mutex_init(&global_epoll_lock))
208 abort();
209
210 if (pthread_atfork(&before_fork, &after_fork, &child_fork))
211 abort();
212 }
213
214
epoll_create1(int flags)215 uv__os390_epoll* epoll_create1(int flags) {
216 uv__os390_epoll* lst;
217
218 lst = uv__malloc(sizeof(*lst));
219 if (lst != NULL) {
220 /* initialize list */
221 lst->size = 0;
222 lst->items = NULL;
223 init_message_queue(lst);
224 maybe_resize(lst, 1);
225 lst->items[lst->size - 1].fd = lst->msg_queue;
226 lst->items[lst->size - 1].events = POLLIN;
227 lst->items[lst->size - 1].revents = 0;
228 uv_once(&once, epoll_init);
229 uv_mutex_lock(&global_epoll_lock);
230 QUEUE_INSERT_TAIL(&global_epoll_queue, &lst->member);
231 uv_mutex_unlock(&global_epoll_lock);
232 }
233
234 return lst;
235 }
236
237
epoll_ctl(uv__os390_epoll * lst,int op,int fd,struct epoll_event * event)238 int epoll_ctl(uv__os390_epoll* lst,
239 int op,
240 int fd,
241 struct epoll_event *event) {
242 uv_mutex_lock(&global_epoll_lock);
243
244 if (op == EPOLL_CTL_DEL) {
245 if (fd >= lst->size || lst->items[fd].fd == -1) {
246 uv_mutex_unlock(&global_epoll_lock);
247 errno = ENOENT;
248 return -1;
249 }
250 lst->items[fd].fd = -1;
251 } else if (op == EPOLL_CTL_ADD) {
252
253 /* Resizing to 'fd + 1' would expand the list to contain at least
254 * 'fd'. But we need to guarantee that the last index on the list
255 * is reserved for the message queue. So specify 'fd + 2' instead.
256 */
257 maybe_resize(lst, fd + 2);
258 if (lst->items[fd].fd != -1) {
259 uv_mutex_unlock(&global_epoll_lock);
260 errno = EEXIST;
261 return -1;
262 }
263 lst->items[fd].fd = fd;
264 lst->items[fd].events = event->events;
265 lst->items[fd].revents = 0;
266 } else if (op == EPOLL_CTL_MOD) {
267 if (fd >= lst->size - 1 || lst->items[fd].fd == -1) {
268 uv_mutex_unlock(&global_epoll_lock);
269 errno = ENOENT;
270 return -1;
271 }
272 lst->items[fd].events = event->events;
273 lst->items[fd].revents = 0;
274 } else
275 abort();
276
277 uv_mutex_unlock(&global_epoll_lock);
278 return 0;
279 }
280
281
epoll_wait(uv__os390_epoll * lst,struct epoll_event * events,int maxevents,int timeout)282 int epoll_wait(uv__os390_epoll* lst, struct epoll_event* events,
283 int maxevents, int timeout) {
284 nmsgsfds_t size;
285 struct pollfd* pfds;
286 int pollret;
287 int reventcount;
288 int nevents;
289
290 _SET_FDS_MSGS(size, 1, lst->size - 1);
291 pfds = lst->items;
292 pollret = poll(pfds, size, timeout);
293 if (pollret <= 0)
294 return pollret;
295
296 pollret = _NFDS(pollret) + _NMSGS(pollret);
297
298 reventcount = 0;
299 nevents = 0;
300 for (int i = 0;
301 i < lst->size && i < maxevents && reventcount < pollret; ++i) {
302 struct epoll_event ev;
303 struct pollfd* pfd;
304
305 pfd = &pfds[i];
306 if (pfd->fd == -1 || pfd->revents == 0)
307 continue;
308
309 ev.fd = pfd->fd;
310 ev.events = pfd->revents;
311 if (pfd->revents & POLLIN && pfd->revents & POLLOUT)
312 reventcount += 2;
313 else if (pfd->revents & (POLLIN | POLLOUT))
314 ++reventcount;
315
316 pfd->revents = 0;
317 events[nevents++] = ev;
318 }
319
320 return nevents;
321 }
322
323
epoll_file_close(int fd)324 int epoll_file_close(int fd) {
325 QUEUE* q;
326
327 uv_once(&once, epoll_init);
328 uv_mutex_lock(&global_epoll_lock);
329 QUEUE_FOREACH(q, &global_epoll_queue) {
330 uv__os390_epoll* lst;
331
332 lst = QUEUE_DATA(q, uv__os390_epoll, member);
333 if (fd < lst->size && lst->items != NULL && lst->items[fd].fd != -1)
334 lst->items[fd].fd = -1;
335 }
336
337 uv_mutex_unlock(&global_epoll_lock);
338 return 0;
339 }
340
epoll_queue_close(uv__os390_epoll * lst)341 void epoll_queue_close(uv__os390_epoll* lst) {
342 /* Remove epoll instance from global queue */
343 uv_mutex_lock(&global_epoll_lock);
344 QUEUE_REMOVE(&lst->member);
345 uv_mutex_unlock(&global_epoll_lock);
346
347 /* Free resources */
348 msgctl(lst->msg_queue, IPC_RMID, NULL);
349 lst->msg_queue = -1;
350 uv__free(lst->items);
351 lst->items = NULL;
352 }
353
354
nanosleep(const struct timespec * req,struct timespec * rem)355 int nanosleep(const struct timespec* req, struct timespec* rem) {
356 unsigned nano;
357 unsigned seconds;
358 unsigned events;
359 unsigned secrem;
360 unsigned nanorem;
361 int rv;
362 int err;
363 int rsn;
364
365 nano = (int)req->tv_nsec;
366 seconds = req->tv_sec;
367 events = CW_CONDVAR | CW_INTRPT;
368 secrem = 0;
369 nanorem = 0;
370
371 #if defined(_LP64)
372 BPX4CTW(&seconds, &nano, &events, &secrem, &nanorem, &rv, &err, &rsn);
373 #else
374 BPX1CTW(&seconds, &nano, &events, &secrem, &nanorem, &rv, &err, &rsn);
375 #endif
376
377 /* Don't clobber errno unless BPX1CTW/BPX4CTW errored.
378 * Don't leak EAGAIN, that just means the timeout expired.
379 */
380 if (rv == -1)
381 if (err != EAGAIN)
382 errno = err;
383
384 if (rem != NULL && (rv == 0 || err == EINTR || err == EAGAIN)) {
385 rem->tv_nsec = nanorem;
386 rem->tv_sec = secrem;
387 }
388
389 return rv;
390 }
391
392
mkdtemp(char * path)393 char* mkdtemp(char* path) {
394 static const char* tempchars =
395 "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
396 static const size_t num_chars = 62;
397 static const size_t num_x = 6;
398 char *ep, *cp;
399 unsigned int tries, i;
400 size_t len;
401 uint64_t v;
402 int fd;
403 int retval;
404 int saved_errno;
405
406 len = strlen(path);
407 ep = path + len;
408 if (len < num_x || strncmp(ep - num_x, "XXXXXX", num_x)) {
409 errno = EINVAL;
410 return NULL;
411 }
412
413 fd = open("/dev/urandom", O_RDONLY);
414 if (fd == -1)
415 return NULL;
416
417 tries = TMP_MAX;
418 retval = -1;
419 do {
420 if (read(fd, &v, sizeof(v)) != sizeof(v))
421 break;
422
423 cp = ep - num_x;
424 for (i = 0; i < num_x; i++) {
425 *cp++ = tempchars[v % num_chars];
426 v /= num_chars;
427 }
428
429 if (mkdir(path, S_IRWXU) == 0) {
430 retval = 0;
431 break;
432 }
433 else if (errno != EEXIST)
434 break;
435 } while (--tries);
436
437 saved_errno = errno;
438 uv__close(fd);
439 if (tries == 0) {
440 errno = EEXIST;
441 return NULL;
442 }
443
444 if (retval == -1) {
445 errno = saved_errno;
446 return NULL;
447 }
448
449 return path;
450 }
451
452
os390_readlink(const char * path,char * buf,size_t len)453 ssize_t os390_readlink(const char* path, char* buf, size_t len) {
454 ssize_t rlen;
455 ssize_t vlen;
456 ssize_t plen;
457 char* delimiter;
458 char old_delim;
459 char* tmpbuf;
460 char realpathstr[PATH_MAX + 1];
461
462 tmpbuf = uv__malloc(len + 1);
463 if (tmpbuf == NULL) {
464 errno = ENOMEM;
465 return -1;
466 }
467
468 rlen = readlink(path, tmpbuf, len);
469 if (rlen < 0) {
470 uv__free(tmpbuf);
471 return rlen;
472 }
473
474 if (rlen < 3 || strncmp("/$", tmpbuf, 2) != 0) {
475 /* Straightforward readlink. */
476 memcpy(buf, tmpbuf, rlen);
477 uv__free(tmpbuf);
478 return rlen;
479 }
480
481 /*
482 * There is a parmlib variable at the beginning
483 * which needs interpretation.
484 */
485 tmpbuf[rlen] = '\0';
486 delimiter = strchr(tmpbuf + 2, '/');
487 if (delimiter == NULL)
488 /* No slash at the end */
489 delimiter = strchr(tmpbuf + 2, '\0');
490
491 /* Read real path of the variable. */
492 old_delim = *delimiter;
493 *delimiter = '\0';
494 if (realpath(tmpbuf, realpathstr) == NULL) {
495 uv__free(tmpbuf);
496 return -1;
497 }
498
499 /* realpathstr is not guaranteed to end with null byte.*/
500 realpathstr[PATH_MAX] = '\0';
501
502 /* Reset the delimiter and fill up the buffer. */
503 *delimiter = old_delim;
504 plen = strlen(delimiter);
505 vlen = strlen(realpathstr);
506 rlen = plen + vlen;
507 if (rlen > len) {
508 uv__free(tmpbuf);
509 errno = ENAMETOOLONG;
510 return -1;
511 }
512 memcpy(buf, realpathstr, vlen);
513 memcpy(buf + vlen, delimiter, plen);
514
515 /* Done using temporary buffer. */
516 uv__free(tmpbuf);
517
518 return rlen;
519 }
520
521
strnlen(const char * str,size_t maxlen)522 size_t strnlen(const char* str, size_t maxlen) {
523 char* p = memchr(str, 0, maxlen);
524 if (p == NULL)
525 return maxlen;
526 else
527 return p - str;
528 }
529