1 /* SPDX-License-Identifier: LGPL-2.1-only */
2 /*
3 * Copyright (c) 2003-2012 Thomas Graf <tgraf@suug.ch>
4 */
5
6 /**
7 * @ingroup cache_mngt
8 * @defgroup cache_mngr Manager
9 * @brief Manager keeping caches up to date automatically.
10 *
11 * The cache manager keeps caches up to date automatically by listening to
12 * netlink notifications and integrating the received information into the
13 * existing cache.
14 *
15 * @note This functionality is still considered experimental.
16 *
17 * Related sections in the development guide:
18 * - @core_doc{_cache_manager,Cache Manager}
19 *
20 * @{
21 *
22 * Header
23 * ------
24 * ~~~~{.c}
25 * #include <netlink/cache.h>
26 * ~~~~
27 */
28
29 #include <netlink-private/netlink.h>
30 #include <netlink-private/utils.h>
31 #include <netlink/netlink.h>
32 #include <netlink/cache.h>
33 #include <netlink/utils.h>
34
35 /** @cond SKIP */
36 #define NASSOC_INIT 16
37 #define NASSOC_EXPAND 8
38 /** @endcond */
39
include_cb(struct nl_object * obj,struct nl_parser_param * p)40 static int include_cb(struct nl_object *obj, struct nl_parser_param *p)
41 {
42 struct nl_cache_assoc *ca = p->pp_arg;
43 struct nl_cache_ops *ops = ca->ca_cache->c_ops;
44
45 NL_DBG(2, "Including object %p into cache %p\n", obj, ca->ca_cache);
46 #ifdef NL_DEBUG
47 if (nl_debug >= 4)
48 nl_object_dump(obj, &nl_debug_dp);
49 #endif
50
51 if (ops->co_event_filter)
52 if (ops->co_event_filter(ca->ca_cache, obj) != NL_OK)
53 return 0;
54
55 if (ops->co_include_event)
56 return ops->co_include_event(ca->ca_cache, obj, ca->ca_change,
57 ca->ca_change_v2,
58 ca->ca_change_data);
59 else {
60 if (ca->ca_change_v2)
61 return nl_cache_include_v2(ca->ca_cache, obj, ca->ca_change_v2, ca->ca_change_data);
62 else
63 return nl_cache_include(ca->ca_cache, obj, ca->ca_change, ca->ca_change_data);
64 }
65
66 }
67
event_input(struct nl_msg * msg,void * arg)68 static int event_input(struct nl_msg *msg, void *arg)
69 {
70 struct nl_cache_mngr *mngr = arg;
71 int protocol = nlmsg_get_proto(msg);
72 int type = nlmsg_hdr(msg)->nlmsg_type;
73 struct nl_cache_ops *ops;
74 int i, n;
75 struct nl_parser_param p = {
76 .pp_cb = include_cb,
77 };
78
79 NL_DBG(2, "Cache manager %p, handling new message %p as event\n",
80 mngr, msg);
81 #ifdef NL_DEBUG
82 if (nl_debug >= 4)
83 nl_msg_dump(msg, stderr);
84 #endif
85
86 if (mngr->cm_protocol != protocol)
87 BUG();
88
89 for (i = 0; i < mngr->cm_nassocs; i++) {
90 if (mngr->cm_assocs[i].ca_cache) {
91 ops = mngr->cm_assocs[i].ca_cache->c_ops;
92 for (n = 0; ops->co_msgtypes[n].mt_id >= 0; n++)
93 if (ops->co_msgtypes[n].mt_id == type)
94 goto found;
95 }
96 }
97
98 return NL_SKIP;
99
100 found:
101 NL_DBG(2, "Associated message %p to cache %p\n",
102 msg, mngr->cm_assocs[i].ca_cache);
103 p.pp_arg = &mngr->cm_assocs[i];
104
105 return nl_cache_parse(ops, NULL, nlmsg_hdr(msg), &p);
106 }
107
108 /**
109 * Allocate new cache manager
110 * @arg sk Netlink socket or NULL to auto allocate
111 * @arg protocol Netlink protocol this manager is used for
112 * @arg flags Flags (\c NL_AUTO_PROVIDE)
113 * @arg result Result pointer
114 *
115 * Allocates a new cache manager for the specified netlink protocol.
116 *
117 * 1. If sk is not specified (\c NULL) a netlink socket matching the
118 * specified protocol will be automatically allocated.
119 *
120 * 2. The socket will be put in non-blocking mode and sequence checking
121 * will be disabled regardless of whether the socket was provided by
122 * the caller or automatically allocated.
123 *
124 * 3. The socket will be connected.
125 *
126 * If the flag \c NL_AUTO_PROVIDE is specified, any cache added to the
127 * manager will automatically be made available to other users using
128 * nl_cache_mngt_provide().
129 *
130 * @note If the socket is provided by the caller, it is NOT recommended
131 * to use the socket for anything else besides receiving netlink
132 * notifications.
133 *
134 * @return 0 on success or a negative error code.
135 */
nl_cache_mngr_alloc(struct nl_sock * sk,int protocol,int flags,struct nl_cache_mngr ** result)136 int nl_cache_mngr_alloc(struct nl_sock *sk, int protocol, int flags,
137 struct nl_cache_mngr **result)
138 {
139 struct nl_cache_mngr *mngr;
140 int err = -NLE_NOMEM;
141
142 /* Catch abuse of flags */
143 if (flags & NL_ALLOCATED_SOCK)
144 BUG();
145
146 mngr = calloc(1, sizeof(*mngr));
147 if (!mngr)
148 return -NLE_NOMEM;
149
150 if (!sk) {
151 if (!(sk = nl_socket_alloc()))
152 goto errout;
153
154 flags |= NL_ALLOCATED_SOCK;
155 }
156
157 mngr->cm_sock = sk;
158 mngr->cm_nassocs = NASSOC_INIT;
159 mngr->cm_protocol = protocol;
160 mngr->cm_flags = flags;
161 mngr->cm_assocs = calloc(mngr->cm_nassocs,
162 sizeof(struct nl_cache_assoc));
163 if (!mngr->cm_assocs)
164 goto errout;
165
166 /* Required to receive async event notifications */
167 nl_socket_disable_seq_check(mngr->cm_sock);
168
169 if ((err = nl_connect(mngr->cm_sock, protocol)) < 0)
170 goto errout;
171
172 if ((err = nl_socket_set_nonblocking(mngr->cm_sock)) < 0)
173 goto errout;
174
175 /* Create and allocate socket for sync cache fills */
176 mngr->cm_sync_sock = nl_socket_alloc();
177 if (!mngr->cm_sync_sock) {
178 err = -NLE_NOMEM;
179 goto errout;
180 }
181 if ((err = nl_connect(mngr->cm_sync_sock, protocol)) < 0)
182 goto errout_free_sync_sock;
183
184 NL_DBG(1, "Allocated cache manager %p, protocol %d, %d caches\n",
185 mngr, protocol, mngr->cm_nassocs);
186
187 *result = mngr;
188 return 0;
189
190 errout_free_sync_sock:
191 nl_socket_free(mngr->cm_sync_sock);
192 errout:
193 nl_cache_mngr_free(mngr);
194 return err;
195 }
196
197 /**
198 * Set change_func_v2 for cache manager
199 * @arg mngr Cache manager.
200 * @arg cache Cache associated with the callback
201 * @arg cb Function to be called upon changes.
202 * @arg data Argument passed on to change callback
203 *
204 * Adds callback change_func_v2 to a registered cache. This callback provides
205 * in like the standard change_func the added or remove netlink object. In case
206 * of a change the old and the new object is provided as well as the according
207 * diff. If this callback is registered this has a higher priority then the
208 * change_func registered during cache registration. Hence only one callback is
209 * executed.
210 *
211 * The first netlink object in the callback is refering to the old object and
212 * the second to the new. This means on NL_ACT_CHANGE the first is the previous
213 * object in the cache and the second the updated version. On NL_ACT_DEL the
214 * first is the deleted object the second is NULL. On NL_ACT_NEW the first is
215 * NULL and the second the new netlink object.
216 *
217 * The user is responsible for calling nl_cache_mngr_poll() or monitor
218 * the socket and call nl_cache_mngr_data_ready() to allow the library
219 * to process netlink notification events.
220 *
221 * @see nl_cache_mngr_poll()
222 * @see nl_cache_mngr_data_ready()
223 *
224 * @return 0 on success or a negative error code.
225 * @return -NLE_PROTO_MISMATCH Protocol mismatch between cache manager and
226 * cache type
227 * @return -NLE_OPNOTSUPP Cache type does not support updates
228 * @return -NLE_RANGE Cache of this type is not registered
229 */
nl_cache_mngr_set_change_func_v2(struct nl_cache_mngr * mngr,struct nl_cache * cache,change_func_v2_t cb,void * data)230 static int nl_cache_mngr_set_change_func_v2(struct nl_cache_mngr *mngr,
231 struct nl_cache *cache,
232 change_func_v2_t cb, void *data)
233 {
234 struct nl_cache_ops *ops;
235 int i;
236
237 ops = cache->c_ops;
238 if (!ops)
239 return -NLE_INVAL;
240
241 if (ops->co_protocol != mngr->cm_protocol)
242 return -NLE_PROTO_MISMATCH;
243
244 if (ops->co_groups == NULL)
245 return -NLE_OPNOTSUPP;
246
247 for (i = 0; i < mngr->cm_nassocs; i++)
248 if (mngr->cm_assocs[i].ca_cache == cache)
249 break;
250
251 if (i >= mngr->cm_nassocs) {
252 return -NLE_RANGE;
253 }
254
255 mngr->cm_assocs[i].ca_change_v2 = cb;
256 mngr->cm_assocs[i].ca_change_data = data;
257
258 return 0;
259 }
260
261 /**
262 * Add cache to cache manager
263 * @arg mngr Cache manager.
264 * @arg cache Cache to be added to cache manager
265 * @arg cb Function to be called upon changes.
266 * @arg data Argument passed on to change callback
267 *
268 * Adds cache to the manager. The operation will trigger a full
269 * dump request from the kernel to initially fill the contents
270 * of the cache. The manager will subscribe to the notification group
271 * of the cache and keep track of any further changes.
272 *
273 * The user is responsible for calling nl_cache_mngr_poll() or monitor
274 * the socket and call nl_cache_mngr_data_ready() to allow the library
275 * to process netlink notification events.
276 *
277 * @see nl_cache_mngr_poll()
278 * @see nl_cache_mngr_data_ready()
279 *
280 * @return 0 on success or a negative error code.
281 * @return -NLE_PROTO_MISMATCH Protocol mismatch between cache manager and
282 * cache type
283 * @return -NLE_OPNOTSUPP Cache type does not support updates
284 * @return -NLE_EXIST Cache of this type already being managed
285 */
nl_cache_mngr_add_cache(struct nl_cache_mngr * mngr,struct nl_cache * cache,change_func_t cb,void * data)286 int nl_cache_mngr_add_cache(struct nl_cache_mngr *mngr, struct nl_cache *cache,
287 change_func_t cb, void *data)
288 {
289 struct nl_cache_ops *ops;
290 struct nl_af_group *grp;
291 int err, i;
292
293 ops = cache->c_ops;
294 if (!ops)
295 return -NLE_INVAL;
296
297 if (ops->co_protocol != mngr->cm_protocol)
298 return -NLE_PROTO_MISMATCH;
299
300 if (ops->co_groups == NULL)
301 return -NLE_OPNOTSUPP;
302
303 for (i = 0; i < mngr->cm_nassocs; i++)
304 if (mngr->cm_assocs[i].ca_cache &&
305 mngr->cm_assocs[i].ca_cache->c_ops == ops)
306 return -NLE_EXIST;
307
308 for (i = 0; i < mngr->cm_nassocs; i++)
309 if (!mngr->cm_assocs[i].ca_cache)
310 break;
311
312 if (i >= mngr->cm_nassocs) {
313 struct nl_cache_assoc *cm_assocs;
314 int cm_nassocs = mngr->cm_nassocs + NASSOC_EXPAND;
315
316 cm_assocs = realloc(mngr->cm_assocs,
317 cm_nassocs * sizeof(struct nl_cache_assoc));
318 if (cm_assocs == NULL)
319 return -NLE_NOMEM;
320
321 memset(cm_assocs + mngr->cm_nassocs, 0,
322 NASSOC_EXPAND * sizeof(struct nl_cache_assoc));
323 mngr->cm_assocs = cm_assocs;
324 mngr->cm_nassocs = cm_nassocs;
325
326 NL_DBG(1, "Increased capacity of cache manager %p " \
327 "to %d\n", mngr, mngr->cm_nassocs);
328 }
329
330 for (grp = ops->co_groups; grp->ag_group; grp++) {
331 err = nl_socket_add_membership(mngr->cm_sock, grp->ag_group);
332 if (err < 0)
333 return err;
334 }
335
336 err = nl_cache_refill(mngr->cm_sync_sock, cache);
337 if (err < 0)
338 goto errout_drop_membership;
339
340 mngr->cm_assocs[i].ca_cache = cache;
341 mngr->cm_assocs[i].ca_change = cb;
342 mngr->cm_assocs[i].ca_change_data = data;
343
344 if (mngr->cm_flags & NL_AUTO_PROVIDE)
345 nl_cache_mngt_provide(cache);
346
347 NL_DBG(1, "Added cache %p <%s> to cache manager %p\n",
348 cache, nl_cache_name(cache), mngr);
349
350 return 0;
351
352 errout_drop_membership:
353 for (grp = ops->co_groups; grp->ag_group; grp++)
354 nl_socket_drop_membership(mngr->cm_sock, grp->ag_group);
355
356 return err;
357 }
358
359 /**
360 * Add cache to cache manager
361 * @arg mngr Cache manager.
362 * @arg cache Cache to be added to cache manager
363 * @arg cb V2 function to be called upon changes.
364 * @arg data Argument passed on to change callback
365 *
366 * Adds cache to the manager. The operation will trigger a full
367 * dump request from the kernel to initially fill the contents
368 * of the cache. The manager will subscribe to the notification group
369 * of the cache and keep track of any further changes.
370 *
371 * The user is responsible for calling nl_cache_mngr_poll() or monitor
372 * the socket and call nl_cache_mngr_data_ready() to allow the library
373 * to process netlink notification events.
374 *
375 * @see nl_cache_mngr_poll()
376 * @see nl_cache_mngr_data_ready()
377 *
378 * @return 0 on success or a negative error code.
379 * @return -NLE_PROTO_MISMATCH Protocol mismatch between cache manager and
380 * cache type
381 * @return -NLE_OPNOTSUPP Cache type does not support updates
382 * @return -NLE_EXIST Cache of this type already being managed
383 */
nl_cache_mngr_add_cache_v2(struct nl_cache_mngr * mngr,struct nl_cache * cache,change_func_v2_t cb,void * data)384 int nl_cache_mngr_add_cache_v2(struct nl_cache_mngr *mngr, struct nl_cache *cache,
385 change_func_v2_t cb, void *data) {
386 int err;
387 err = nl_cache_mngr_add_cache(mngr, cache, NULL, NULL);
388 if (err < 0)
389 return err;
390
391 return nl_cache_mngr_set_change_func_v2(mngr, cache, cb, data);
392 }
393
394 /**
395 * Add cache to cache manager
396 * @arg mngr Cache manager.
397 * @arg name Name of cache to keep track of
398 * @arg cb Function to be called upon changes.
399 * @arg data Argument passed on to change callback
400 * @arg result Pointer to store added cache (optional)
401 *
402 * Allocates a new cache of the specified type and adds it to the manager.
403 * The operation will trigger a full dump request from the kernel to
404 * initially fill the contents of the cache. The manager will subscribe
405 * to the notification group of the cache and keep track of any further
406 * changes.
407 *
408 * The user is responsible for calling nl_cache_mngr_poll() or monitor
409 * the socket and call nl_cache_mngr_data_ready() to allow the library
410 * to process netlink notification events.
411 *
412 * @note Versions up to 3.4.0 actually required the result argument, preventing
413 * NULL to be passed.
414 *
415 * @see nl_cache_mngr_poll()
416 * @see nl_cache_mngr_data_ready()
417 *
418 * @return 0 on success or a negative error code.
419 * @return -NLE_NOCACHE Unknown cache type
420 * @return -NLE_PROTO_MISMATCH Protocol mismatch between cache manager and
421 * cache type
422 * @return -NLE_OPNOTSUPP Cache type does not support updates
423 * @return -NLE_EXIST Cache of this type already being managed
424 */
nl_cache_mngr_add(struct nl_cache_mngr * mngr,const char * name,change_func_t cb,void * data,struct nl_cache ** result)425 int nl_cache_mngr_add(struct nl_cache_mngr *mngr, const char *name,
426 change_func_t cb, void *data, struct nl_cache **result)
427 {
428 struct nl_cache_ops *ops;
429 struct nl_cache *cache;
430 int err;
431
432 ops = nl_cache_ops_lookup_safe(name);
433 if (!ops)
434 return -NLE_NOCACHE;
435
436 cache = nl_cache_alloc(ops);
437 nl_cache_ops_put(ops);
438 if (!cache)
439 return -NLE_NOMEM;
440
441 err = nl_cache_mngr_add_cache(mngr, cache, cb, data);
442 if (err < 0)
443 goto errout_free_cache;
444
445 if (result)
446 *result = cache;
447 return 0;
448
449 errout_free_cache:
450 nl_cache_free(cache);
451
452 return err;
453 }
454
455 /**
456 * Get socket file descriptor
457 * @arg mngr Cache Manager
458 *
459 * Get the file descriptor of the socket associated with the manager.
460 *
461 * @note Do not use the socket for anything besides receiving
462 * notifications.
463 */
nl_cache_mngr_get_fd(struct nl_cache_mngr * mngr)464 int nl_cache_mngr_get_fd(struct nl_cache_mngr *mngr)
465 {
466 return nl_socket_get_fd(mngr->cm_sock);
467 }
468
469 /**
470 * Check for event notifications
471 * @arg mngr Cache Manager
472 * @arg timeout Upper limit poll() will block, in milliseconds.
473 *
474 * Causes poll() to be called to check for new event notifications
475 * being available. Calls nl_cache_mngr_data_ready() to process
476 * available data.
477 *
478 * This functionally is ideally called regularly during an idle
479 * period.
480 *
481 * A timeout can be specified in milliseconds to limit the time the
482 * function will wait for updates.
483 *
484 * @see nl_cache_mngr_data_ready()
485 *
486 * @return The number of messages processed or a negative error code.
487 */
nl_cache_mngr_poll(struct nl_cache_mngr * mngr,int timeout)488 int nl_cache_mngr_poll(struct nl_cache_mngr *mngr, int timeout)
489 {
490 int ret;
491 struct pollfd fds = {
492 .fd = nl_socket_get_fd(mngr->cm_sock),
493 .events = POLLIN,
494 };
495
496 NL_DBG(3, "Cache manager %p, poll() fd %d\n", mngr, fds.fd);
497 ret = poll(&fds, 1, timeout);
498 NL_DBG(3, "Cache manager %p, poll() returned %d\n", mngr, ret);
499 if (ret < 0) {
500 NL_DBG(4, "nl_cache_mngr_poll(%p): poll() failed with %d (%s)\n",
501 mngr, errno, nl_strerror_l(errno));
502 return -nl_syserr2nlerr(errno);
503 }
504
505 /* No events, return */
506 if (ret == 0)
507 return 0;
508
509 return nl_cache_mngr_data_ready(mngr);
510 }
511
512 /**
513 * Receive available event notifications
514 * @arg mngr Cache manager
515 *
516 * This function can be called if the socket associated to the manager
517 * contains updates to be received. This function should only be used
518 * if nl_cache_mngr_poll() is not used.
519 *
520 * The function will process messages until there is no more data to
521 * be read from the socket.
522 *
523 * @see nl_cache_mngr_poll()
524 *
525 * @return The number of messages processed or a negative error code.
526 */
nl_cache_mngr_data_ready(struct nl_cache_mngr * mngr)527 int nl_cache_mngr_data_ready(struct nl_cache_mngr *mngr)
528 {
529 int err, nread = 0;
530 struct nl_cb *cb;
531
532 NL_DBG(2, "Cache manager %p, reading new data from fd %d\n",
533 mngr, nl_socket_get_fd(mngr->cm_sock));
534
535 cb = nl_cb_clone(mngr->cm_sock->s_cb);
536 if (cb == NULL)
537 return -NLE_NOMEM;
538
539 nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, event_input, mngr);
540
541 while ((err = nl_recvmsgs_report(mngr->cm_sock, cb)) > 0) {
542 NL_DBG(2, "Cache manager %p, recvmsgs read %d messages\n",
543 mngr, err);
544 nread += err;
545 }
546
547 nl_cb_put(cb);
548 if (err < 0 && err != -NLE_AGAIN)
549 return err;
550
551 return nread;
552 }
553
554 /**
555 * Print information about cache manager
556 * @arg mngr Cache manager
557 * @arg p Dumping parameters
558 *
559 * Prints information about the cache manager including all managed caches.
560 *
561 * @note This is a debugging function.
562 */
nl_cache_mngr_info(struct nl_cache_mngr * mngr,struct nl_dump_params * p)563 void nl_cache_mngr_info(struct nl_cache_mngr *mngr, struct nl_dump_params *p)
564 {
565 char buf[128];
566 int i;
567
568 nl_dump_line(p, "cache-manager <%p>\n", mngr);
569 nl_dump_line(p, " .protocol = %s\n",
570 nl_nlfamily2str(mngr->cm_protocol, buf, sizeof(buf)));
571 nl_dump_line(p, " .flags = %#x\n", mngr->cm_flags);
572 nl_dump_line(p, " .nassocs = %u\n", mngr->cm_nassocs);
573 nl_dump_line(p, " .sock = <%p>\n", mngr->cm_sock);
574
575 for (i = 0; i < mngr->cm_nassocs; i++) {
576 struct nl_cache_assoc *assoc = &mngr->cm_assocs[i];
577
578 if (assoc->ca_cache) {
579 nl_dump_line(p, " .cache[%d] = <%p> {\n", i, assoc->ca_cache);
580 nl_dump_line(p, " .name = %s\n", assoc->ca_cache->c_ops->co_name);
581 nl_dump_line(p, " .change_func = <%p>\n", assoc->ca_change);
582 nl_dump_line(p, " .change_data = <%p>\n", assoc->ca_change_data);
583 nl_dump_line(p, " .nitems = %u\n", nl_cache_nitems(assoc->ca_cache));
584 nl_dump_line(p, " .objects = {\n");
585
586 p->dp_prefix += 6;
587 nl_cache_dump(assoc->ca_cache, p);
588 p->dp_prefix -= 6;
589
590 nl_dump_line(p, " }\n");
591 nl_dump_line(p, " }\n");
592 }
593 }
594 }
595
596 /**
597 * Free cache manager and all caches.
598 * @arg mngr Cache manager.
599 *
600 * Release all resources held by a cache manager.
601 */
nl_cache_mngr_free(struct nl_cache_mngr * mngr)602 void nl_cache_mngr_free(struct nl_cache_mngr *mngr)
603 {
604 int i;
605
606 if (!mngr)
607 return;
608
609 if (mngr->cm_sock)
610 nl_close(mngr->cm_sock);
611
612 if (mngr->cm_sync_sock) {
613 nl_close(mngr->cm_sync_sock);
614 nl_socket_free(mngr->cm_sync_sock);
615 }
616
617 if (mngr->cm_flags & NL_ALLOCATED_SOCK)
618 nl_socket_free(mngr->cm_sock);
619
620 for (i = 0; i < mngr->cm_nassocs; i++) {
621 if (mngr->cm_assocs[i].ca_cache) {
622 nl_cache_mngt_unprovide(mngr->cm_assocs[i].ca_cache);
623 nl_cache_free(mngr->cm_assocs[i].ca_cache);
624 }
625 }
626
627 free(mngr->cm_assocs);
628
629 NL_DBG(1, "Cache manager %p freed\n", mngr);
630
631 free(mngr);
632 }
633
634 /** @} */
635