1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "resolv_cache.h"
30 #include <resolv.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <time.h>
34 #include "pthread.h"
35
36 #include <errno.h>
37 #include "arpa_nameser.h"
38 #include <sys/system_properties.h>
39 #include <net/if.h>
40 #include <netdb.h>
41 #include <linux/if.h>
42
43 #include <arpa/inet.h>
44 #include "resolv_private.h"
45
46 /* This code implements a small and *simple* DNS resolver cache.
47 *
48 * It is only used to cache DNS answers for a time defined by the smallest TTL
49 * among the answer records in order to reduce DNS traffic. It is not supposed
50 * to be a full DNS cache, since we plan to implement that in the future in a
51 * dedicated process running on the system.
52 *
53 * Note that its design is kept simple very intentionally, i.e.:
54 *
55 * - it takes raw DNS query packet data as input, and returns raw DNS
56 * answer packet data as output
57 *
58 * (this means that two similar queries that encode the DNS name
59 * differently will be treated distinctly).
60 *
61 * the smallest TTL value among the answer records are used as the time
62 * to keep an answer in the cache.
63 *
64 * this is bad, but we absolutely want to avoid parsing the answer packets
65 * (and should be solved by the later full DNS cache process).
66 *
67 * - the implementation is just a (query-data) => (answer-data) hash table
68 * with a trivial least-recently-used expiration policy.
69 *
70 * Doing this keeps the code simple and avoids to deal with a lot of things
71 * that a full DNS cache is expected to do.
72 *
73 * The API is also very simple:
74 *
75 * - the client calls _resolv_cache_get() to obtain a handle to the cache.
76 * this will initialize the cache on first usage. the result can be NULL
77 * if the cache is disabled.
78 *
79 * - the client calls _resolv_cache_lookup() before performing a query
80 *
81 * if the function returns RESOLV_CACHE_FOUND, a copy of the answer data
82 * has been copied into the client-provided answer buffer.
83 *
84 * if the function returns RESOLV_CACHE_NOTFOUND, the client should perform
85 * a request normally, *then* call _resolv_cache_add() to add the received
86 * answer to the cache.
87 *
88 * if the function returns RESOLV_CACHE_UNSUPPORTED, the client should
89 * perform a request normally, and *not* call _resolv_cache_add()
90 *
91 * note that RESOLV_CACHE_UNSUPPORTED is also returned if the answer buffer
92 * is too short to accomodate the cached result.
93 *
94 * - when network settings change, the cache must be flushed since the list
95 * of DNS servers probably changed. this is done by calling
96 * _resolv_cache_reset()
97 *
98 * the parameter to this function must be an ever-increasing generation
99 * number corresponding to the current network settings state.
100 *
101 * This is done because several threads could detect the same network
102 * settings change (but at different times) and will all end up calling the
103 * same function. Comparing with the last used generation number ensures
104 * that the cache is only flushed once per network change.
105 */
106
107 /* the name of an environment variable that will be checked the first time
108 * this code is called if its value is "0", then the resolver cache is
109 * disabled.
110 */
111 #define CONFIG_ENV "BIONIC_DNSCACHE"
112
113 /* entries older than CONFIG_SECONDS seconds are always discarded.
114 */
115 #define CONFIG_SECONDS (60*10) /* 10 minutes */
116
117 /* default number of entries kept in the cache. This value has been
118 * determined by browsing through various sites and counting the number
119 * of corresponding requests. Keep in mind that our framework is currently
120 * performing two requests per name lookup (one for IPv4, the other for IPv6)
121 *
122 * www.google.com 4
123 * www.ysearch.com 6
124 * www.amazon.com 8
125 * www.nytimes.com 22
126 * www.espn.com 28
127 * www.msn.com 28
128 * www.lemonde.fr 35
129 *
130 * (determined in 2009-2-17 from Paris, France, results may vary depending
131 * on location)
132 *
133 * most high-level websites use lots of media/ad servers with different names
134 * but these are generally reused when browsing through the site.
135 *
136 * As such, a value of 64 should be relatively comfortable at the moment.
137 *
138 * The system property ro.net.dns_cache_size can be used to override the default
139 * value with a custom value
140 */
141 #define CONFIG_MAX_ENTRIES 64
142
143 /* name of the system property that can be used to set the cache size */
144 #define DNS_CACHE_SIZE_PROP_NAME "ro.net.dns_cache_size"
145
146 /****************************************************************************/
147 /****************************************************************************/
148 /***** *****/
149 /***** *****/
150 /***** *****/
151 /****************************************************************************/
152 /****************************************************************************/
153
154 /* set to 1 to debug cache operations */
155 #define DEBUG 0
156
157 /* set to 1 to debug query data */
158 #define DEBUG_DATA 0
159
160 #undef XLOG
161 #if DEBUG
162 # include <logd.h>
163 # define XLOG(...) \
164 __libc_android_log_print(ANDROID_LOG_DEBUG,"libc",__VA_ARGS__)
165
166 #include <stdio.h>
167 #include <stdarg.h>
168
169 /** BOUNDED BUFFER FORMATTING
170 **/
171
172 /* technical note:
173 *
174 * the following debugging routines are used to append data to a bounded
175 * buffer they take two parameters that are:
176 *
177 * - p : a pointer to the current cursor position in the buffer
178 * this value is initially set to the buffer's address.
179 *
180 * - end : the address of the buffer's limit, i.e. of the first byte
181 * after the buffer. this address should never be touched.
182 *
183 * IMPORTANT: it is assumed that end > buffer_address, i.e.
184 * that the buffer is at least one byte.
185 *
186 * the _bprint_() functions return the new value of 'p' after the data
187 * has been appended, and also ensure the following:
188 *
189 * - the returned value will never be strictly greater than 'end'
190 *
191 * - a return value equal to 'end' means that truncation occured
192 * (in which case, end[-1] will be set to 0)
193 *
194 * - after returning from a _bprint_() function, the content of the buffer
195 * is always 0-terminated, even in the event of truncation.
196 *
197 * these conventions allow you to call _bprint_ functions multiple times and
198 * only check for truncation at the end of the sequence, as in:
199 *
200 * char buff[1000], *p = buff, *end = p + sizeof(buff);
201 *
202 * p = _bprint_c(p, end, '"');
203 * p = _bprint_s(p, end, my_string);
204 * p = _bprint_c(p, end, '"');
205 *
206 * if (p >= end) {
207 * // buffer was too small
208 * }
209 *
210 * printf( "%s", buff );
211 */
212
213 /* add a char to a bounded buffer */
214 static char*
_bprint_c(char * p,char * end,int c)215 _bprint_c( char* p, char* end, int c )
216 {
217 if (p < end) {
218 if (p+1 == end)
219 *p++ = 0;
220 else {
221 *p++ = (char) c;
222 *p = 0;
223 }
224 }
225 return p;
226 }
227
228 /* add a sequence of bytes to a bounded buffer */
229 static char*
_bprint_b(char * p,char * end,const char * buf,int len)230 _bprint_b( char* p, char* end, const char* buf, int len )
231 {
232 int avail = end - p;
233
234 if (avail <= 0 || len <= 0)
235 return p;
236
237 if (avail > len)
238 avail = len;
239
240 memcpy( p, buf, avail );
241 p += avail;
242
243 if (p < end)
244 p[0] = 0;
245 else
246 end[-1] = 0;
247
248 return p;
249 }
250
251 /* add a string to a bounded buffer */
252 static char*
_bprint_s(char * p,char * end,const char * str)253 _bprint_s( char* p, char* end, const char* str )
254 {
255 return _bprint_b(p, end, str, strlen(str));
256 }
257
258 /* add a formatted string to a bounded buffer */
259 static char*
_bprint(char * p,char * end,const char * format,...)260 _bprint( char* p, char* end, const char* format, ... )
261 {
262 int avail, n;
263 va_list args;
264
265 avail = end - p;
266
267 if (avail <= 0)
268 return p;
269
270 va_start(args, format);
271 n = vsnprintf( p, avail, format, args);
272 va_end(args);
273
274 /* certain C libraries return -1 in case of truncation */
275 if (n < 0 || n > avail)
276 n = avail;
277
278 p += n;
279 /* certain C libraries do not zero-terminate in case of truncation */
280 if (p == end)
281 p[-1] = 0;
282
283 return p;
284 }
285
286 /* add a hex value to a bounded buffer, up to 8 digits */
287 static char*
_bprint_hex(char * p,char * end,unsigned value,int numDigits)288 _bprint_hex( char* p, char* end, unsigned value, int numDigits )
289 {
290 char text[sizeof(unsigned)*2];
291 int nn = 0;
292
293 while (numDigits-- > 0) {
294 text[nn++] = "0123456789abcdef"[(value >> (numDigits*4)) & 15];
295 }
296 return _bprint_b(p, end, text, nn);
297 }
298
299 /* add the hexadecimal dump of some memory area to a bounded buffer */
300 static char*
_bprint_hexdump(char * p,char * end,const uint8_t * data,int datalen)301 _bprint_hexdump( char* p, char* end, const uint8_t* data, int datalen )
302 {
303 int lineSize = 16;
304
305 while (datalen > 0) {
306 int avail = datalen;
307 int nn;
308
309 if (avail > lineSize)
310 avail = lineSize;
311
312 for (nn = 0; nn < avail; nn++) {
313 if (nn > 0)
314 p = _bprint_c(p, end, ' ');
315 p = _bprint_hex(p, end, data[nn], 2);
316 }
317 for ( ; nn < lineSize; nn++ ) {
318 p = _bprint_s(p, end, " ");
319 }
320 p = _bprint_s(p, end, " ");
321
322 for (nn = 0; nn < avail; nn++) {
323 int c = data[nn];
324
325 if (c < 32 || c > 127)
326 c = '.';
327
328 p = _bprint_c(p, end, c);
329 }
330 p = _bprint_c(p, end, '\n');
331
332 data += avail;
333 datalen -= avail;
334 }
335 return p;
336 }
337
338 /* dump the content of a query of packet to the log */
339 static void
XLOG_BYTES(const void * base,int len)340 XLOG_BYTES( const void* base, int len )
341 {
342 char buff[1024];
343 char* p = buff, *end = p + sizeof(buff);
344
345 p = _bprint_hexdump(p, end, base, len);
346 XLOG("%s",buff);
347 }
348
349 #else /* !DEBUG */
350 # define XLOG(...) ((void)0)
351 # define XLOG_BYTES(a,b) ((void)0)
352 #endif
353
354 static time_t
_time_now(void)355 _time_now( void )
356 {
357 struct timeval tv;
358
359 gettimeofday( &tv, NULL );
360 return tv.tv_sec;
361 }
362
363 /* reminder: the general format of a DNS packet is the following:
364 *
365 * HEADER (12 bytes)
366 * QUESTION (variable)
367 * ANSWER (variable)
368 * AUTHORITY (variable)
369 * ADDITIONNAL (variable)
370 *
371 * the HEADER is made of:
372 *
373 * ID : 16 : 16-bit unique query identification field
374 *
375 * QR : 1 : set to 0 for queries, and 1 for responses
376 * Opcode : 4 : set to 0 for queries
377 * AA : 1 : set to 0 for queries
378 * TC : 1 : truncation flag, will be set to 0 in queries
379 * RD : 1 : recursion desired
380 *
381 * RA : 1 : recursion available (0 in queries)
382 * Z : 3 : three reserved zero bits
383 * RCODE : 4 : response code (always 0=NOERROR in queries)
384 *
385 * QDCount: 16 : question count
386 * ANCount: 16 : Answer count (0 in queries)
387 * NSCount: 16: Authority Record count (0 in queries)
388 * ARCount: 16: Additionnal Record count (0 in queries)
389 *
390 * the QUESTION is made of QDCount Question Record (QRs)
391 * the ANSWER is made of ANCount RRs
392 * the AUTHORITY is made of NSCount RRs
393 * the ADDITIONNAL is made of ARCount RRs
394 *
395 * Each Question Record (QR) is made of:
396 *
397 * QNAME : variable : Query DNS NAME
398 * TYPE : 16 : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
399 * CLASS : 16 : class of query (IN=1)
400 *
401 * Each Resource Record (RR) is made of:
402 *
403 * NAME : variable : DNS NAME
404 * TYPE : 16 : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
405 * CLASS : 16 : class of query (IN=1)
406 * TTL : 32 : seconds to cache this RR (0=none)
407 * RDLENGTH: 16 : size of RDDATA in bytes
408 * RDDATA : variable : RR data (depends on TYPE)
409 *
410 * Each QNAME contains a domain name encoded as a sequence of 'labels'
411 * terminated by a zero. Each label has the following format:
412 *
413 * LEN : 8 : lenght of label (MUST be < 64)
414 * NAME : 8*LEN : label length (must exclude dots)
415 *
416 * A value of 0 in the encoding is interpreted as the 'root' domain and
417 * terminates the encoding. So 'www.android.com' will be encoded as:
418 *
419 * <3>www<7>android<3>com<0>
420 *
421 * Where <n> represents the byte with value 'n'
422 *
423 * Each NAME reflects the QNAME of the question, but has a slightly more
424 * complex encoding in order to provide message compression. This is achieved
425 * by using a 2-byte pointer, with format:
426 *
427 * TYPE : 2 : 0b11 to indicate a pointer, 0b01 and 0b10 are reserved
428 * OFFSET : 14 : offset to another part of the DNS packet
429 *
430 * The offset is relative to the start of the DNS packet and must point
431 * A pointer terminates the encoding.
432 *
433 * The NAME can be encoded in one of the following formats:
434 *
435 * - a sequence of simple labels terminated by 0 (like QNAMEs)
436 * - a single pointer
437 * - a sequence of simple labels terminated by a pointer
438 *
439 * A pointer shall always point to either a pointer of a sequence of
440 * labels (which can themselves be terminated by either a 0 or a pointer)
441 *
442 * The expanded length of a given domain name should not exceed 255 bytes.
443 *
444 * NOTE: we don't parse the answer packets, so don't need to deal with NAME
445 * records, only QNAMEs.
446 */
447
448 #define DNS_HEADER_SIZE 12
449
450 #define DNS_TYPE_A "\00\01" /* big-endian decimal 1 */
451 #define DNS_TYPE_PTR "\00\014" /* big-endian decimal 12 */
452 #define DNS_TYPE_MX "\00\017" /* big-endian decimal 15 */
453 #define DNS_TYPE_AAAA "\00\034" /* big-endian decimal 28 */
454 #define DNS_TYPE_ALL "\00\0377" /* big-endian decimal 255 */
455
456 #define DNS_CLASS_IN "\00\01" /* big-endian decimal 1 */
457
458 typedef struct {
459 const uint8_t* base;
460 const uint8_t* end;
461 const uint8_t* cursor;
462 } DnsPacket;
463
464 static void
_dnsPacket_init(DnsPacket * packet,const uint8_t * buff,int bufflen)465 _dnsPacket_init( DnsPacket* packet, const uint8_t* buff, int bufflen )
466 {
467 packet->base = buff;
468 packet->end = buff + bufflen;
469 packet->cursor = buff;
470 }
471
472 static void
_dnsPacket_rewind(DnsPacket * packet)473 _dnsPacket_rewind( DnsPacket* packet )
474 {
475 packet->cursor = packet->base;
476 }
477
478 static void
_dnsPacket_skip(DnsPacket * packet,int count)479 _dnsPacket_skip( DnsPacket* packet, int count )
480 {
481 const uint8_t* p = packet->cursor + count;
482
483 if (p > packet->end)
484 p = packet->end;
485
486 packet->cursor = p;
487 }
488
489 static int
_dnsPacket_readInt16(DnsPacket * packet)490 _dnsPacket_readInt16( DnsPacket* packet )
491 {
492 const uint8_t* p = packet->cursor;
493
494 if (p+2 > packet->end)
495 return -1;
496
497 packet->cursor = p+2;
498 return (p[0]<< 8) | p[1];
499 }
500
501 /** QUERY CHECKING
502 **/
503
504 /* check bytes in a dns packet. returns 1 on success, 0 on failure.
505 * the cursor is only advanced in the case of success
506 */
507 static int
_dnsPacket_checkBytes(DnsPacket * packet,int numBytes,const void * bytes)508 _dnsPacket_checkBytes( DnsPacket* packet, int numBytes, const void* bytes )
509 {
510 const uint8_t* p = packet->cursor;
511
512 if (p + numBytes > packet->end)
513 return 0;
514
515 if (memcmp(p, bytes, numBytes) != 0)
516 return 0;
517
518 packet->cursor = p + numBytes;
519 return 1;
520 }
521
522 /* parse and skip a given QNAME stored in a query packet,
523 * from the current cursor position. returns 1 on success,
524 * or 0 for malformed data.
525 */
526 static int
_dnsPacket_checkQName(DnsPacket * packet)527 _dnsPacket_checkQName( DnsPacket* packet )
528 {
529 const uint8_t* p = packet->cursor;
530 const uint8_t* end = packet->end;
531
532 for (;;) {
533 int c;
534
535 if (p >= end)
536 break;
537
538 c = *p++;
539
540 if (c == 0) {
541 packet->cursor = p;
542 return 1;
543 }
544
545 /* we don't expect label compression in QNAMEs */
546 if (c >= 64)
547 break;
548
549 p += c;
550 /* we rely on the bound check at the start
551 * of the loop here */
552 }
553 /* malformed data */
554 XLOG("malformed QNAME");
555 return 0;
556 }
557
558 /* parse and skip a given QR stored in a packet.
559 * returns 1 on success, and 0 on failure
560 */
561 static int
_dnsPacket_checkQR(DnsPacket * packet)562 _dnsPacket_checkQR( DnsPacket* packet )
563 {
564 int len;
565
566 if (!_dnsPacket_checkQName(packet))
567 return 0;
568
569 /* TYPE must be one of the things we support */
570 if (!_dnsPacket_checkBytes(packet, 2, DNS_TYPE_A) &&
571 !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_PTR) &&
572 !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_MX) &&
573 !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_AAAA) &&
574 !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_ALL))
575 {
576 XLOG("unsupported TYPE");
577 return 0;
578 }
579 /* CLASS must be IN */
580 if (!_dnsPacket_checkBytes(packet, 2, DNS_CLASS_IN)) {
581 XLOG("unsupported CLASS");
582 return 0;
583 }
584
585 return 1;
586 }
587
588 /* check the header of a DNS Query packet, return 1 if it is one
589 * type of query we can cache, or 0 otherwise
590 */
591 static int
_dnsPacket_checkQuery(DnsPacket * packet)592 _dnsPacket_checkQuery( DnsPacket* packet )
593 {
594 const uint8_t* p = packet->base;
595 int qdCount, anCount, dnCount, arCount;
596
597 if (p + DNS_HEADER_SIZE > packet->end) {
598 XLOG("query packet too small");
599 return 0;
600 }
601
602 /* QR must be set to 0, opcode must be 0 and AA must be 0 */
603 /* RA, Z, and RCODE must be 0 */
604 if ((p[2] & 0xFC) != 0 || p[3] != 0) {
605 XLOG("query packet flags unsupported");
606 return 0;
607 }
608
609 /* Note that we ignore the TC and RD bits here for the
610 * following reasons:
611 *
612 * - there is no point for a query packet sent to a server
613 * to have the TC bit set, but the implementation might
614 * set the bit in the query buffer for its own needs
615 * between a _resolv_cache_lookup and a
616 * _resolv_cache_add. We should not freak out if this
617 * is the case.
618 *
619 * - we consider that the result from a RD=0 or a RD=1
620 * query might be different, hence that the RD bit
621 * should be used to differentiate cached result.
622 *
623 * this implies that RD is checked when hashing or
624 * comparing query packets, but not TC
625 */
626
627 /* ANCOUNT, DNCOUNT and ARCOUNT must be 0 */
628 qdCount = (p[4] << 8) | p[5];
629 anCount = (p[6] << 8) | p[7];
630 dnCount = (p[8] << 8) | p[9];
631 arCount = (p[10]<< 8) | p[11];
632
633 if (anCount != 0 || dnCount != 0 || arCount != 0) {
634 XLOG("query packet contains non-query records");
635 return 0;
636 }
637
638 if (qdCount == 0) {
639 XLOG("query packet doesn't contain query record");
640 return 0;
641 }
642
643 /* Check QDCOUNT QRs */
644 packet->cursor = p + DNS_HEADER_SIZE;
645
646 for (;qdCount > 0; qdCount--)
647 if (!_dnsPacket_checkQR(packet))
648 return 0;
649
650 return 1;
651 }
652
653 /** QUERY DEBUGGING
654 **/
655 #if DEBUG
656 static char*
_dnsPacket_bprintQName(DnsPacket * packet,char * bp,char * bend)657 _dnsPacket_bprintQName(DnsPacket* packet, char* bp, char* bend)
658 {
659 const uint8_t* p = packet->cursor;
660 const uint8_t* end = packet->end;
661 int first = 1;
662
663 for (;;) {
664 int c;
665
666 if (p >= end)
667 break;
668
669 c = *p++;
670
671 if (c == 0) {
672 packet->cursor = p;
673 return bp;
674 }
675
676 /* we don't expect label compression in QNAMEs */
677 if (c >= 64)
678 break;
679
680 if (first)
681 first = 0;
682 else
683 bp = _bprint_c(bp, bend, '.');
684
685 bp = _bprint_b(bp, bend, (const char*)p, c);
686
687 p += c;
688 /* we rely on the bound check at the start
689 * of the loop here */
690 }
691 /* malformed data */
692 bp = _bprint_s(bp, bend, "<MALFORMED>");
693 return bp;
694 }
695
696 static char*
_dnsPacket_bprintQR(DnsPacket * packet,char * p,char * end)697 _dnsPacket_bprintQR(DnsPacket* packet, char* p, char* end)
698 {
699 #define QQ(x) { DNS_TYPE_##x, #x }
700 static const struct {
701 const char* typeBytes;
702 const char* typeString;
703 } qTypes[] =
704 {
705 QQ(A), QQ(PTR), QQ(MX), QQ(AAAA), QQ(ALL),
706 { NULL, NULL }
707 };
708 int nn;
709 const char* typeString = NULL;
710
711 /* dump QNAME */
712 p = _dnsPacket_bprintQName(packet, p, end);
713
714 /* dump TYPE */
715 p = _bprint_s(p, end, " (");
716
717 for (nn = 0; qTypes[nn].typeBytes != NULL; nn++) {
718 if (_dnsPacket_checkBytes(packet, 2, qTypes[nn].typeBytes)) {
719 typeString = qTypes[nn].typeString;
720 break;
721 }
722 }
723
724 if (typeString != NULL)
725 p = _bprint_s(p, end, typeString);
726 else {
727 int typeCode = _dnsPacket_readInt16(packet);
728 p = _bprint(p, end, "UNKNOWN-%d", typeCode);
729 }
730
731 p = _bprint_c(p, end, ')');
732
733 /* skip CLASS */
734 _dnsPacket_skip(packet, 2);
735 return p;
736 }
737
738 /* this function assumes the packet has already been checked */
739 static char*
_dnsPacket_bprintQuery(DnsPacket * packet,char * p,char * end)740 _dnsPacket_bprintQuery( DnsPacket* packet, char* p, char* end )
741 {
742 int qdCount;
743
744 if (packet->base[2] & 0x1) {
745 p = _bprint_s(p, end, "RECURSIVE ");
746 }
747
748 _dnsPacket_skip(packet, 4);
749 qdCount = _dnsPacket_readInt16(packet);
750 _dnsPacket_skip(packet, 6);
751
752 for ( ; qdCount > 0; qdCount-- ) {
753 p = _dnsPacket_bprintQR(packet, p, end);
754 }
755 return p;
756 }
757 #endif
758
759
760 /** QUERY HASHING SUPPORT
761 **
762 ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKET HAS ALREADY
763 ** BEEN SUCCESFULLY CHECKED.
764 **/
765
766 /* use 32-bit FNV hash function */
767 #define FNV_MULT 16777619U
768 #define FNV_BASIS 2166136261U
769
770 static unsigned
_dnsPacket_hashBytes(DnsPacket * packet,int numBytes,unsigned hash)771 _dnsPacket_hashBytes( DnsPacket* packet, int numBytes, unsigned hash )
772 {
773 const uint8_t* p = packet->cursor;
774 const uint8_t* end = packet->end;
775
776 while (numBytes > 0 && p < end) {
777 hash = hash*FNV_MULT ^ *p++;
778 }
779 packet->cursor = p;
780 return hash;
781 }
782
783
784 static unsigned
_dnsPacket_hashQName(DnsPacket * packet,unsigned hash)785 _dnsPacket_hashQName( DnsPacket* packet, unsigned hash )
786 {
787 const uint8_t* p = packet->cursor;
788 const uint8_t* end = packet->end;
789
790 for (;;) {
791 int c;
792
793 if (p >= end) { /* should not happen */
794 XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
795 break;
796 }
797
798 c = *p++;
799
800 if (c == 0)
801 break;
802
803 if (c >= 64) {
804 XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
805 break;
806 }
807 if (p + c >= end) {
808 XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
809 __FUNCTION__);
810 break;
811 }
812 while (c > 0) {
813 hash = hash*FNV_MULT ^ *p++;
814 c -= 1;
815 }
816 }
817 packet->cursor = p;
818 return hash;
819 }
820
821 static unsigned
_dnsPacket_hashQR(DnsPacket * packet,unsigned hash)822 _dnsPacket_hashQR( DnsPacket* packet, unsigned hash )
823 {
824 int len;
825
826 hash = _dnsPacket_hashQName(packet, hash);
827 hash = _dnsPacket_hashBytes(packet, 4, hash); /* TYPE and CLASS */
828 return hash;
829 }
830
831 static unsigned
_dnsPacket_hashQuery(DnsPacket * packet)832 _dnsPacket_hashQuery( DnsPacket* packet )
833 {
834 unsigned hash = FNV_BASIS;
835 int count;
836 _dnsPacket_rewind(packet);
837
838 /* we ignore the TC bit for reasons explained in
839 * _dnsPacket_checkQuery().
840 *
841 * however we hash the RD bit to differentiate
842 * between answers for recursive and non-recursive
843 * queries.
844 */
845 hash = hash*FNV_MULT ^ (packet->base[2] & 1);
846
847 /* assume: other flags are 0 */
848 _dnsPacket_skip(packet, 4);
849
850 /* read QDCOUNT */
851 count = _dnsPacket_readInt16(packet);
852
853 /* assume: ANcount, NScount, ARcount are 0 */
854 _dnsPacket_skip(packet, 6);
855
856 /* hash QDCOUNT QRs */
857 for ( ; count > 0; count-- )
858 hash = _dnsPacket_hashQR(packet, hash);
859
860 return hash;
861 }
862
863
864 /** QUERY COMPARISON
865 **
866 ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKETS HAVE ALREADY
867 ** BEEN SUCCESFULLY CHECKED.
868 **/
869
870 static int
_dnsPacket_isEqualDomainName(DnsPacket * pack1,DnsPacket * pack2)871 _dnsPacket_isEqualDomainName( DnsPacket* pack1, DnsPacket* pack2 )
872 {
873 const uint8_t* p1 = pack1->cursor;
874 const uint8_t* end1 = pack1->end;
875 const uint8_t* p2 = pack2->cursor;
876 const uint8_t* end2 = pack2->end;
877
878 for (;;) {
879 int c1, c2;
880
881 if (p1 >= end1 || p2 >= end2) {
882 XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
883 break;
884 }
885 c1 = *p1++;
886 c2 = *p2++;
887 if (c1 != c2)
888 break;
889
890 if (c1 == 0) {
891 pack1->cursor = p1;
892 pack2->cursor = p2;
893 return 1;
894 }
895 if (c1 >= 64) {
896 XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
897 break;
898 }
899 if ((p1+c1 > end1) || (p2+c1 > end2)) {
900 XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
901 __FUNCTION__);
902 break;
903 }
904 if (memcmp(p1, p2, c1) != 0)
905 break;
906 p1 += c1;
907 p2 += c1;
908 /* we rely on the bound checks at the start of the loop */
909 }
910 /* not the same, or one is malformed */
911 XLOG("different DN");
912 return 0;
913 }
914
915 static int
_dnsPacket_isEqualBytes(DnsPacket * pack1,DnsPacket * pack2,int numBytes)916 _dnsPacket_isEqualBytes( DnsPacket* pack1, DnsPacket* pack2, int numBytes )
917 {
918 const uint8_t* p1 = pack1->cursor;
919 const uint8_t* p2 = pack2->cursor;
920
921 if ( p1 + numBytes > pack1->end || p2 + numBytes > pack2->end )
922 return 0;
923
924 if ( memcmp(p1, p2, numBytes) != 0 )
925 return 0;
926
927 pack1->cursor += numBytes;
928 pack2->cursor += numBytes;
929 return 1;
930 }
931
932 static int
_dnsPacket_isEqualQR(DnsPacket * pack1,DnsPacket * pack2)933 _dnsPacket_isEqualQR( DnsPacket* pack1, DnsPacket* pack2 )
934 {
935 /* compare domain name encoding + TYPE + CLASS */
936 if ( !_dnsPacket_isEqualDomainName(pack1, pack2) ||
937 !_dnsPacket_isEqualBytes(pack1, pack2, 2+2) )
938 return 0;
939
940 return 1;
941 }
942
943 static int
_dnsPacket_isEqualQuery(DnsPacket * pack1,DnsPacket * pack2)944 _dnsPacket_isEqualQuery( DnsPacket* pack1, DnsPacket* pack2 )
945 {
946 int count1, count2;
947
948 /* compare the headers, ignore most fields */
949 _dnsPacket_rewind(pack1);
950 _dnsPacket_rewind(pack2);
951
952 /* compare RD, ignore TC, see comment in _dnsPacket_checkQuery */
953 if ((pack1->base[2] & 1) != (pack2->base[2] & 1)) {
954 XLOG("different RD");
955 return 0;
956 }
957
958 /* assume: other flags are all 0 */
959 _dnsPacket_skip(pack1, 4);
960 _dnsPacket_skip(pack2, 4);
961
962 /* compare QDCOUNT */
963 count1 = _dnsPacket_readInt16(pack1);
964 count2 = _dnsPacket_readInt16(pack2);
965 if (count1 != count2 || count1 < 0) {
966 XLOG("different QDCOUNT");
967 return 0;
968 }
969
970 /* assume: ANcount, NScount and ARcount are all 0 */
971 _dnsPacket_skip(pack1, 6);
972 _dnsPacket_skip(pack2, 6);
973
974 /* compare the QDCOUNT QRs */
975 for ( ; count1 > 0; count1-- ) {
976 if (!_dnsPacket_isEqualQR(pack1, pack2)) {
977 XLOG("different QR");
978 return 0;
979 }
980 }
981 return 1;
982 }
983
984 /****************************************************************************/
985 /****************************************************************************/
986 /***** *****/
987 /***** *****/
988 /***** *****/
989 /****************************************************************************/
990 /****************************************************************************/
991
992 /* cache entry. for simplicity, 'hash' and 'hlink' are inlined in this
993 * structure though they are conceptually part of the hash table.
994 *
995 * similarly, mru_next and mru_prev are part of the global MRU list
996 */
997 typedef struct Entry {
998 unsigned int hash; /* hash value */
999 struct Entry* hlink; /* next in collision chain */
1000 struct Entry* mru_prev;
1001 struct Entry* mru_next;
1002
1003 const uint8_t* query;
1004 int querylen;
1005 const uint8_t* answer;
1006 int answerlen;
1007 time_t expires; /* time_t when the entry isn't valid any more */
1008 int id; /* for debugging purpose */
1009 } Entry;
1010
1011 /**
1012 * Parse the answer records and find the smallest
1013 * TTL among the answer records.
1014 *
1015 * The returned TTL is the number of seconds to
1016 * keep the answer in the cache.
1017 *
1018 * In case of parse error zero (0) is returned which
1019 * indicates that the answer shall not be cached.
1020 */
1021 static u_long
answer_getTTL(const void * answer,int answerlen)1022 answer_getTTL(const void* answer, int answerlen)
1023 {
1024 ns_msg handle;
1025 int ancount, n;
1026 u_long result, ttl;
1027 ns_rr rr;
1028
1029 result = 0;
1030 if (ns_initparse(answer, answerlen, &handle) >= 0) {
1031 // get number of answer records
1032 ancount = ns_msg_count(handle, ns_s_an);
1033 for (n = 0; n < ancount; n++) {
1034 if (ns_parserr(&handle, ns_s_an, n, &rr) == 0) {
1035 ttl = ns_rr_ttl(rr);
1036 if (n == 0 || ttl < result) {
1037 result = ttl;
1038 }
1039 } else {
1040 XLOG("ns_parserr failed ancount no = %d. errno = %s\n", n, strerror(errno));
1041 }
1042 }
1043 } else {
1044 XLOG("ns_parserr failed. %s\n", strerror(errno));
1045 }
1046
1047 XLOG("TTL = %d\n", result);
1048
1049 return result;
1050 }
1051
1052 static void
entry_free(Entry * e)1053 entry_free( Entry* e )
1054 {
1055 /* everything is allocated in a single memory block */
1056 if (e) {
1057 free(e);
1058 }
1059 }
1060
1061 static __inline__ void
entry_mru_remove(Entry * e)1062 entry_mru_remove( Entry* e )
1063 {
1064 e->mru_prev->mru_next = e->mru_next;
1065 e->mru_next->mru_prev = e->mru_prev;
1066 }
1067
1068 static __inline__ void
entry_mru_add(Entry * e,Entry * list)1069 entry_mru_add( Entry* e, Entry* list )
1070 {
1071 Entry* first = list->mru_next;
1072
1073 e->mru_next = first;
1074 e->mru_prev = list;
1075
1076 list->mru_next = e;
1077 first->mru_prev = e;
1078 }
1079
1080 /* compute the hash of a given entry, this is a hash of most
1081 * data in the query (key) */
1082 static unsigned
entry_hash(const Entry * e)1083 entry_hash( const Entry* e )
1084 {
1085 DnsPacket pack[1];
1086
1087 _dnsPacket_init(pack, e->query, e->querylen);
1088 return _dnsPacket_hashQuery(pack);
1089 }
1090
1091 /* initialize an Entry as a search key, this also checks the input query packet
1092 * returns 1 on success, or 0 in case of unsupported/malformed data */
1093 static int
entry_init_key(Entry * e,const void * query,int querylen)1094 entry_init_key( Entry* e, const void* query, int querylen )
1095 {
1096 DnsPacket pack[1];
1097
1098 memset(e, 0, sizeof(*e));
1099
1100 e->query = query;
1101 e->querylen = querylen;
1102 e->hash = entry_hash(e);
1103
1104 _dnsPacket_init(pack, query, querylen);
1105
1106 return _dnsPacket_checkQuery(pack);
1107 }
1108
1109 /* allocate a new entry as a cache node */
1110 static Entry*
entry_alloc(const Entry * init,const void * answer,int answerlen)1111 entry_alloc( const Entry* init, const void* answer, int answerlen )
1112 {
1113 Entry* e;
1114 int size;
1115
1116 size = sizeof(*e) + init->querylen + answerlen;
1117 e = calloc(size, 1);
1118 if (e == NULL)
1119 return e;
1120
1121 e->hash = init->hash;
1122 e->query = (const uint8_t*)(e+1);
1123 e->querylen = init->querylen;
1124
1125 memcpy( (char*)e->query, init->query, e->querylen );
1126
1127 e->answer = e->query + e->querylen;
1128 e->answerlen = answerlen;
1129
1130 memcpy( (char*)e->answer, answer, e->answerlen );
1131
1132 return e;
1133 }
1134
1135 static int
entry_equals(const Entry * e1,const Entry * e2)1136 entry_equals( const Entry* e1, const Entry* e2 )
1137 {
1138 DnsPacket pack1[1], pack2[1];
1139
1140 if (e1->querylen != e2->querylen) {
1141 return 0;
1142 }
1143 _dnsPacket_init(pack1, e1->query, e1->querylen);
1144 _dnsPacket_init(pack2, e2->query, e2->querylen);
1145
1146 return _dnsPacket_isEqualQuery(pack1, pack2);
1147 }
1148
1149 /****************************************************************************/
1150 /****************************************************************************/
1151 /***** *****/
1152 /***** *****/
1153 /***** *****/
1154 /****************************************************************************/
1155 /****************************************************************************/
1156
1157 /* We use a simple hash table with external collision lists
1158 * for simplicity, the hash-table fields 'hash' and 'hlink' are
1159 * inlined in the Entry structure.
1160 */
1161
1162 typedef struct resolv_cache {
1163 int max_entries;
1164 int num_entries;
1165 Entry mru_list;
1166 pthread_mutex_t lock;
1167 unsigned generation;
1168 int last_id;
1169 Entry* entries;
1170 } Cache;
1171
1172 typedef struct resolv_cache_info {
1173 char ifname[IF_NAMESIZE + 1];
1174 struct in_addr ifaddr;
1175 Cache* cache;
1176 struct resolv_cache_info* next;
1177 char* nameservers[MAXNS +1];
1178 struct addrinfo* nsaddrinfo[MAXNS + 1];
1179 } CacheInfo;
1180
1181 #define HTABLE_VALID(x) ((x) != NULL && (x) != HTABLE_DELETED)
1182
1183 static void
_cache_flush_locked(Cache * cache)1184 _cache_flush_locked( Cache* cache )
1185 {
1186 int nn;
1187 time_t now = _time_now();
1188
1189 for (nn = 0; nn < cache->max_entries; nn++)
1190 {
1191 Entry** pnode = (Entry**) &cache->entries[nn];
1192
1193 while (*pnode != NULL) {
1194 Entry* node = *pnode;
1195 *pnode = node->hlink;
1196 entry_free(node);
1197 }
1198 }
1199
1200 cache->mru_list.mru_next = cache->mru_list.mru_prev = &cache->mru_list;
1201 cache->num_entries = 0;
1202 cache->last_id = 0;
1203
1204 XLOG("*************************\n"
1205 "*** DNS CACHE FLUSHED ***\n"
1206 "*************************");
1207 }
1208
1209 /* Return max number of entries allowed in the cache,
1210 * i.e. cache size. The cache size is either defined
1211 * by system property ro.net.dns_cache_size or by
1212 * CONFIG_MAX_ENTRIES if system property not set
1213 * or set to invalid value. */
1214 static int
_res_cache_get_max_entries(void)1215 _res_cache_get_max_entries( void )
1216 {
1217 int result = -1;
1218 char cache_size[PROP_VALUE_MAX];
1219
1220 if (__system_property_get(DNS_CACHE_SIZE_PROP_NAME, cache_size) > 0) {
1221 result = atoi(cache_size);
1222 }
1223
1224 // ro.net.dns_cache_size not set or set to negative value
1225 if (result <= 0) {
1226 result = CONFIG_MAX_ENTRIES;
1227 }
1228
1229 XLOG("cache size: %d", result);
1230 return result;
1231 }
1232
1233 static struct resolv_cache*
_resolv_cache_create(void)1234 _resolv_cache_create( void )
1235 {
1236 struct resolv_cache* cache;
1237
1238 cache = calloc(sizeof(*cache), 1);
1239 if (cache) {
1240 cache->max_entries = _res_cache_get_max_entries();
1241 cache->entries = calloc(sizeof(*cache->entries), cache->max_entries);
1242 if (cache->entries) {
1243 cache->generation = ~0U;
1244 pthread_mutex_init( &cache->lock, NULL );
1245 cache->mru_list.mru_prev = cache->mru_list.mru_next = &cache->mru_list;
1246 XLOG("%s: cache created\n", __FUNCTION__);
1247 } else {
1248 free(cache);
1249 cache = NULL;
1250 }
1251 }
1252 return cache;
1253 }
1254
1255
1256 #if DEBUG
1257 static void
_dump_query(const uint8_t * query,int querylen)1258 _dump_query( const uint8_t* query, int querylen )
1259 {
1260 char temp[256], *p=temp, *end=p+sizeof(temp);
1261 DnsPacket pack[1];
1262
1263 _dnsPacket_init(pack, query, querylen);
1264 p = _dnsPacket_bprintQuery(pack, p, end);
1265 XLOG("QUERY: %s", temp);
1266 }
1267
1268 static void
_cache_dump_mru(Cache * cache)1269 _cache_dump_mru( Cache* cache )
1270 {
1271 char temp[512], *p=temp, *end=p+sizeof(temp);
1272 Entry* e;
1273
1274 p = _bprint(temp, end, "MRU LIST (%2d): ", cache->num_entries);
1275 for (e = cache->mru_list.mru_next; e != &cache->mru_list; e = e->mru_next)
1276 p = _bprint(p, end, " %d", e->id);
1277
1278 XLOG("%s", temp);
1279 }
1280
1281 static void
_dump_answer(const void * answer,int answerlen)1282 _dump_answer(const void* answer, int answerlen)
1283 {
1284 res_state statep;
1285 FILE* fp;
1286 char* buf;
1287 int fileLen;
1288
1289 fp = fopen("/data/reslog.txt", "w+");
1290 if (fp != NULL) {
1291 statep = __res_get_state();
1292
1293 res_pquery(statep, answer, answerlen, fp);
1294
1295 //Get file length
1296 fseek(fp, 0, SEEK_END);
1297 fileLen=ftell(fp);
1298 fseek(fp, 0, SEEK_SET);
1299 buf = (char *)malloc(fileLen+1);
1300 if (buf != NULL) {
1301 //Read file contents into buffer
1302 fread(buf, fileLen, 1, fp);
1303 XLOG("%s\n", buf);
1304 free(buf);
1305 }
1306 fclose(fp);
1307 remove("/data/reslog.txt");
1308 }
1309 else {
1310 XLOG("_dump_answer: can't open file\n");
1311 }
1312 }
1313 #endif
1314
1315 #if DEBUG
1316 # define XLOG_QUERY(q,len) _dump_query((q), (len))
1317 # define XLOG_ANSWER(a, len) _dump_answer((a), (len))
1318 #else
1319 # define XLOG_QUERY(q,len) ((void)0)
1320 # define XLOG_ANSWER(a,len) ((void)0)
1321 #endif
1322
1323 /* This function tries to find a key within the hash table
1324 * In case of success, it will return a *pointer* to the hashed key.
1325 * In case of failure, it will return a *pointer* to NULL
1326 *
1327 * So, the caller must check '*result' to check for success/failure.
1328 *
1329 * The main idea is that the result can later be used directly in
1330 * calls to _resolv_cache_add or _resolv_cache_remove as the 'lookup'
1331 * parameter. This makes the code simpler and avoids re-searching
1332 * for the key position in the htable.
1333 *
1334 * The result of a lookup_p is only valid until you alter the hash
1335 * table.
1336 */
1337 static Entry**
_cache_lookup_p(Cache * cache,Entry * key)1338 _cache_lookup_p( Cache* cache,
1339 Entry* key )
1340 {
1341 int index = key->hash % cache->max_entries;
1342 Entry** pnode = (Entry**) &cache->entries[ index ];
1343
1344 while (*pnode != NULL) {
1345 Entry* node = *pnode;
1346
1347 if (node == NULL)
1348 break;
1349
1350 if (node->hash == key->hash && entry_equals(node, key))
1351 break;
1352
1353 pnode = &node->hlink;
1354 }
1355 return pnode;
1356 }
1357
1358 /* Add a new entry to the hash table. 'lookup' must be the
1359 * result of an immediate previous failed _lookup_p() call
1360 * (i.e. with *lookup == NULL), and 'e' is the pointer to the
1361 * newly created entry
1362 */
1363 static void
_cache_add_p(Cache * cache,Entry ** lookup,Entry * e)1364 _cache_add_p( Cache* cache,
1365 Entry** lookup,
1366 Entry* e )
1367 {
1368 *lookup = e;
1369 e->id = ++cache->last_id;
1370 entry_mru_add(e, &cache->mru_list);
1371 cache->num_entries += 1;
1372
1373 XLOG("%s: entry %d added (count=%d)", __FUNCTION__,
1374 e->id, cache->num_entries);
1375 }
1376
1377 /* Remove an existing entry from the hash table,
1378 * 'lookup' must be the result of an immediate previous
1379 * and succesful _lookup_p() call.
1380 */
1381 static void
_cache_remove_p(Cache * cache,Entry ** lookup)1382 _cache_remove_p( Cache* cache,
1383 Entry** lookup )
1384 {
1385 Entry* e = *lookup;
1386
1387 XLOG("%s: entry %d removed (count=%d)", __FUNCTION__,
1388 e->id, cache->num_entries-1);
1389
1390 entry_mru_remove(e);
1391 *lookup = e->hlink;
1392 entry_free(e);
1393 cache->num_entries -= 1;
1394 }
1395
1396 /* Remove the oldest entry from the hash table.
1397 */
1398 static void
_cache_remove_oldest(Cache * cache)1399 _cache_remove_oldest( Cache* cache )
1400 {
1401 Entry* oldest = cache->mru_list.mru_prev;
1402 Entry** lookup = _cache_lookup_p(cache, oldest);
1403
1404 if (*lookup == NULL) { /* should not happen */
1405 XLOG("%s: OLDEST NOT IN HTABLE ?", __FUNCTION__);
1406 return;
1407 }
1408 if (DEBUG) {
1409 XLOG("Cache full - removing oldest");
1410 XLOG_QUERY(oldest->query, oldest->querylen);
1411 }
1412 _cache_remove_p(cache, lookup);
1413 }
1414
1415
1416 ResolvCacheStatus
_resolv_cache_lookup(struct resolv_cache * cache,const void * query,int querylen,void * answer,int answersize,int * answerlen)1417 _resolv_cache_lookup( struct resolv_cache* cache,
1418 const void* query,
1419 int querylen,
1420 void* answer,
1421 int answersize,
1422 int *answerlen )
1423 {
1424 DnsPacket pack[1];
1425 Entry key[1];
1426 int index;
1427 Entry** lookup;
1428 Entry* e;
1429 time_t now;
1430
1431 ResolvCacheStatus result = RESOLV_CACHE_NOTFOUND;
1432
1433 XLOG("%s: lookup", __FUNCTION__);
1434 XLOG_QUERY(query, querylen);
1435
1436 /* we don't cache malformed queries */
1437 if (!entry_init_key(key, query, querylen)) {
1438 XLOG("%s: unsupported query", __FUNCTION__);
1439 return RESOLV_CACHE_UNSUPPORTED;
1440 }
1441 /* lookup cache */
1442 pthread_mutex_lock( &cache->lock );
1443
1444 /* see the description of _lookup_p to understand this.
1445 * the function always return a non-NULL pointer.
1446 */
1447 lookup = _cache_lookup_p(cache, key);
1448 e = *lookup;
1449
1450 if (e == NULL) {
1451 XLOG( "NOT IN CACHE");
1452 goto Exit;
1453 }
1454
1455 now = _time_now();
1456
1457 /* remove stale entries here */
1458 if (now >= e->expires) {
1459 XLOG( " NOT IN CACHE (STALE ENTRY %p DISCARDED)", *lookup );
1460 XLOG_QUERY(e->query, e->querylen);
1461 _cache_remove_p(cache, lookup);
1462 goto Exit;
1463 }
1464
1465 *answerlen = e->answerlen;
1466 if (e->answerlen > answersize) {
1467 /* NOTE: we return UNSUPPORTED if the answer buffer is too short */
1468 result = RESOLV_CACHE_UNSUPPORTED;
1469 XLOG(" ANSWER TOO LONG");
1470 goto Exit;
1471 }
1472
1473 memcpy( answer, e->answer, e->answerlen );
1474
1475 /* bump up this entry to the top of the MRU list */
1476 if (e != cache->mru_list.mru_next) {
1477 entry_mru_remove( e );
1478 entry_mru_add( e, &cache->mru_list );
1479 }
1480
1481 XLOG( "FOUND IN CACHE entry=%p", e );
1482 result = RESOLV_CACHE_FOUND;
1483
1484 Exit:
1485 pthread_mutex_unlock( &cache->lock );
1486 return result;
1487 }
1488
1489
1490 void
_resolv_cache_add(struct resolv_cache * cache,const void * query,int querylen,const void * answer,int answerlen)1491 _resolv_cache_add( struct resolv_cache* cache,
1492 const void* query,
1493 int querylen,
1494 const void* answer,
1495 int answerlen )
1496 {
1497 Entry key[1];
1498 Entry* e;
1499 Entry** lookup;
1500 u_long ttl;
1501
1502 /* don't assume that the query has already been cached
1503 */
1504 if (!entry_init_key( key, query, querylen )) {
1505 XLOG( "%s: passed invalid query ?", __FUNCTION__);
1506 return;
1507 }
1508
1509 pthread_mutex_lock( &cache->lock );
1510
1511 XLOG( "%s: query:", __FUNCTION__ );
1512 XLOG_QUERY(query,querylen);
1513 XLOG_ANSWER(answer, answerlen);
1514 #if DEBUG_DATA
1515 XLOG( "answer:");
1516 XLOG_BYTES(answer,answerlen);
1517 #endif
1518
1519 lookup = _cache_lookup_p(cache, key);
1520 e = *lookup;
1521
1522 if (e != NULL) { /* should not happen */
1523 XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
1524 __FUNCTION__, e);
1525 goto Exit;
1526 }
1527
1528 if (cache->num_entries >= cache->max_entries) {
1529 _cache_remove_oldest(cache);
1530 /* need to lookup again */
1531 lookup = _cache_lookup_p(cache, key);
1532 e = *lookup;
1533 if (e != NULL) {
1534 XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
1535 __FUNCTION__, e);
1536 goto Exit;
1537 }
1538 }
1539
1540 ttl = answer_getTTL(answer, answerlen);
1541 if (ttl > 0) {
1542 e = entry_alloc(key, answer, answerlen);
1543 if (e != NULL) {
1544 e->expires = ttl + _time_now();
1545 _cache_add_p(cache, lookup, e);
1546 }
1547 }
1548 #if DEBUG
1549 _cache_dump_mru(cache);
1550 #endif
1551 Exit:
1552 pthread_mutex_unlock( &cache->lock );
1553 }
1554
1555 /****************************************************************************/
1556 /****************************************************************************/
1557 /***** *****/
1558 /***** *****/
1559 /***** *****/
1560 /****************************************************************************/
1561 /****************************************************************************/
1562
1563 static pthread_once_t _res_cache_once;
1564
1565 // Head of the list of caches. Protected by _res_cache_list_lock.
1566 static struct resolv_cache_info _res_cache_list;
1567
1568 // name of the current default inteface
1569 static char _res_default_ifname[IF_NAMESIZE + 1];
1570
1571 // lock protecting everything in the _resolve_cache_info structs (next ptr, etc)
1572 static pthread_mutex_t _res_cache_list_lock;
1573
1574
1575 /* lookup the default interface name */
1576 static char *_get_default_iface_locked();
1577 /* insert resolv_cache_info into the list of resolv_cache_infos */
1578 static void _insert_cache_info_locked(struct resolv_cache_info* cache_info);
1579 /* creates a resolv_cache_info */
1580 static struct resolv_cache_info* _create_cache_info( void );
1581 /* gets cache associated with an interface name, or NULL if none exists */
1582 static struct resolv_cache* _find_named_cache_locked(const char* ifname);
1583 /* gets a resolv_cache_info associated with an interface name, or NULL if not found */
1584 static struct resolv_cache_info* _find_cache_info_locked(const char* ifname);
1585 /* free dns name server list of a resolv_cache_info structure */
1586 static void _free_nameservers(struct resolv_cache_info* cache_info);
1587 /* look up the named cache, and creates one if needed */
1588 static struct resolv_cache* _get_res_cache_for_iface_locked(const char* ifname);
1589 /* empty the named cache */
1590 static void _flush_cache_for_iface_locked(const char* ifname);
1591 /* empty the nameservers set for the named cache */
1592 static void _free_nameservers_locked(struct resolv_cache_info* cache_info);
1593 /* lookup the namserver for the name interface */
1594 static int _get_nameserver_locked(const char* ifname, int n, char* addr, int addrLen);
1595 /* lookup the addr of the nameserver for the named interface */
1596 static struct addrinfo* _get_nameserver_addr_locked(const char* ifname, int n);
1597 /* lookup the inteface's address */
1598 static struct in_addr* _get_addr_locked(const char * ifname);
1599
1600
1601
1602 static void
_res_cache_init(void)1603 _res_cache_init(void)
1604 {
1605 const char* env = getenv(CONFIG_ENV);
1606
1607 if (env && atoi(env) == 0) {
1608 /* the cache is disabled */
1609 return;
1610 }
1611
1612 memset(&_res_default_ifname, 0, sizeof(_res_default_ifname));
1613 memset(&_res_cache_list, 0, sizeof(_res_cache_list));
1614 pthread_mutex_init(&_res_cache_list_lock, NULL);
1615 }
1616
1617 struct resolv_cache*
__get_res_cache(void)1618 __get_res_cache(void)
1619 {
1620 struct resolv_cache *cache;
1621
1622 pthread_once(&_res_cache_once, _res_cache_init);
1623
1624 pthread_mutex_lock(&_res_cache_list_lock);
1625
1626 char* ifname = _get_default_iface_locked();
1627
1628 // if default interface not set then use the first cache
1629 // associated with an interface as the default one.
1630 if (ifname[0] == '\0') {
1631 struct resolv_cache_info* cache_info = _res_cache_list.next;
1632 while (cache_info) {
1633 if (cache_info->ifname[0] != '\0') {
1634 ifname = cache_info->ifname;
1635 break;
1636 }
1637
1638 cache_info = cache_info->next;
1639 }
1640 }
1641 cache = _get_res_cache_for_iface_locked(ifname);
1642
1643 pthread_mutex_unlock(&_res_cache_list_lock);
1644 XLOG("_get_res_cache. default_ifname = %s\n", ifname);
1645 return cache;
1646 }
1647
1648 static struct resolv_cache*
_get_res_cache_for_iface_locked(const char * ifname)1649 _get_res_cache_for_iface_locked(const char* ifname)
1650 {
1651 if (ifname == NULL)
1652 return NULL;
1653
1654 struct resolv_cache* cache = _find_named_cache_locked(ifname);
1655 if (!cache) {
1656 struct resolv_cache_info* cache_info = _create_cache_info();
1657 if (cache_info) {
1658 cache = _resolv_cache_create();
1659 if (cache) {
1660 int len = sizeof(cache_info->ifname);
1661 cache_info->cache = cache;
1662 strncpy(cache_info->ifname, ifname, len - 1);
1663 cache_info->ifname[len - 1] = '\0';
1664
1665 _insert_cache_info_locked(cache_info);
1666 } else {
1667 free(cache_info);
1668 }
1669 }
1670 }
1671 return cache;
1672 }
1673
1674 void
_resolv_cache_reset(unsigned generation)1675 _resolv_cache_reset(unsigned generation)
1676 {
1677 XLOG("%s: generation=%d", __FUNCTION__, generation);
1678
1679 pthread_once(&_res_cache_once, _res_cache_init);
1680 pthread_mutex_lock(&_res_cache_list_lock);
1681
1682 char* ifname = _get_default_iface_locked();
1683 // if default interface not set then use the first cache
1684 // associated with an interface as the default one.
1685 // Note: Copied the code from __get_res_cache since this
1686 // method will be deleted/obsolete when cache per interface
1687 // implemented all over
1688 if (ifname[0] == '\0') {
1689 struct resolv_cache_info* cache_info = _res_cache_list.next;
1690 while (cache_info) {
1691 if (cache_info->ifname[0] != '\0') {
1692 ifname = cache_info->ifname;
1693 break;
1694 }
1695
1696 cache_info = cache_info->next;
1697 }
1698 }
1699 struct resolv_cache* cache = _get_res_cache_for_iface_locked(ifname);
1700
1701 if (cache != NULL) {
1702 pthread_mutex_lock( &cache->lock );
1703 if (cache->generation != generation) {
1704 _cache_flush_locked(cache);
1705 cache->generation = generation;
1706 }
1707 pthread_mutex_unlock( &cache->lock );
1708 }
1709
1710 pthread_mutex_unlock(&_res_cache_list_lock);
1711 }
1712
1713 void
_resolv_flush_cache_for_default_iface(void)1714 _resolv_flush_cache_for_default_iface(void)
1715 {
1716 char* ifname;
1717
1718 pthread_once(&_res_cache_once, _res_cache_init);
1719 pthread_mutex_lock(&_res_cache_list_lock);
1720
1721 ifname = _get_default_iface_locked();
1722 _flush_cache_for_iface_locked(ifname);
1723
1724 pthread_mutex_unlock(&_res_cache_list_lock);
1725 }
1726
1727 void
_resolv_flush_cache_for_iface(const char * ifname)1728 _resolv_flush_cache_for_iface(const char* ifname)
1729 {
1730 pthread_once(&_res_cache_once, _res_cache_init);
1731 pthread_mutex_lock(&_res_cache_list_lock);
1732
1733 _flush_cache_for_iface_locked(ifname);
1734
1735 pthread_mutex_unlock(&_res_cache_list_lock);
1736 }
1737
1738 static void
_flush_cache_for_iface_locked(const char * ifname)1739 _flush_cache_for_iface_locked(const char* ifname)
1740 {
1741 struct resolv_cache* cache = _find_named_cache_locked(ifname);
1742 if (cache) {
1743 pthread_mutex_lock(&cache->lock);
1744 _cache_flush_locked(cache);
1745 pthread_mutex_unlock(&cache->lock);
1746 }
1747 }
1748
1749 static struct resolv_cache_info*
_create_cache_info(void)1750 _create_cache_info(void)
1751 {
1752 struct resolv_cache_info* cache_info;
1753
1754 cache_info = calloc(sizeof(*cache_info), 1);
1755 return cache_info;
1756 }
1757
1758 static void
_insert_cache_info_locked(struct resolv_cache_info * cache_info)1759 _insert_cache_info_locked(struct resolv_cache_info* cache_info)
1760 {
1761 struct resolv_cache_info* last;
1762
1763 for (last = &_res_cache_list; last->next; last = last->next);
1764
1765 last->next = cache_info;
1766
1767 }
1768
1769 static struct resolv_cache*
_find_named_cache_locked(const char * ifname)1770 _find_named_cache_locked(const char* ifname) {
1771
1772 struct resolv_cache_info* info = _find_cache_info_locked(ifname);
1773
1774 if (info != NULL) return info->cache;
1775
1776 return NULL;
1777 }
1778
1779 static struct resolv_cache_info*
_find_cache_info_locked(const char * ifname)1780 _find_cache_info_locked(const char* ifname)
1781 {
1782 if (ifname == NULL)
1783 return NULL;
1784
1785 struct resolv_cache_info* cache_info = _res_cache_list.next;
1786
1787 while (cache_info) {
1788 if (strcmp(cache_info->ifname, ifname) == 0) {
1789 break;
1790 }
1791
1792 cache_info = cache_info->next;
1793 }
1794 return cache_info;
1795 }
1796
1797 static char*
_get_default_iface_locked(void)1798 _get_default_iface_locked(void)
1799 {
1800 char* iface = _res_default_ifname;
1801
1802 return iface;
1803 }
1804
1805 void
_resolv_set_default_iface(const char * ifname)1806 _resolv_set_default_iface(const char* ifname)
1807 {
1808 XLOG("_resolv_set_default_if ifname %s\n",ifname);
1809
1810 pthread_once(&_res_cache_once, _res_cache_init);
1811 pthread_mutex_lock(&_res_cache_list_lock);
1812
1813 int size = sizeof(_res_default_ifname);
1814 memset(_res_default_ifname, 0, size);
1815 strncpy(_res_default_ifname, ifname, size - 1);
1816 _res_default_ifname[size - 1] = '\0';
1817
1818 pthread_mutex_unlock(&_res_cache_list_lock);
1819 }
1820
1821 void
_resolv_set_nameservers_for_iface(const char * ifname,char ** servers,int numservers)1822 _resolv_set_nameservers_for_iface(const char* ifname, char** servers, int numservers)
1823 {
1824 int i, rt, index;
1825 struct addrinfo hints;
1826 char sbuf[NI_MAXSERV];
1827
1828 pthread_once(&_res_cache_once, _res_cache_init);
1829
1830 pthread_mutex_lock(&_res_cache_list_lock);
1831 // creates the cache if not created
1832 _get_res_cache_for_iface_locked(ifname);
1833
1834 struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
1835
1836 if (cache_info != NULL) {
1837 // free current before adding new
1838 _free_nameservers_locked(cache_info);
1839
1840 memset(&hints, 0, sizeof(hints));
1841 hints.ai_family = PF_UNSPEC;
1842 hints.ai_socktype = SOCK_DGRAM; /*dummy*/
1843 hints.ai_flags = AI_NUMERICHOST;
1844 sprintf(sbuf, "%u", NAMESERVER_PORT);
1845
1846 index = 0;
1847 for (i = 0; i < numservers && i < MAXNS; i++) {
1848 rt = getaddrinfo(servers[i], sbuf, &hints, &cache_info->nsaddrinfo[index]);
1849 if (rt == 0) {
1850 cache_info->nameservers[index] = strdup(servers[i]);
1851 index++;
1852 } else {
1853 cache_info->nsaddrinfo[index] = NULL;
1854 }
1855 }
1856 }
1857 pthread_mutex_unlock(&_res_cache_list_lock);
1858 }
1859
1860 static void
_free_nameservers_locked(struct resolv_cache_info * cache_info)1861 _free_nameservers_locked(struct resolv_cache_info* cache_info)
1862 {
1863 int i;
1864 for (i = 0; i <= MAXNS; i++) {
1865 free(cache_info->nameservers[i]);
1866 cache_info->nameservers[i] = NULL;
1867 if (cache_info->nsaddrinfo[i] != NULL) {
1868 freeaddrinfo(cache_info->nsaddrinfo[i]);
1869 cache_info->nsaddrinfo[i] = NULL;
1870 }
1871 }
1872 }
1873
1874 int
_resolv_cache_get_nameserver(int n,char * addr,int addrLen)1875 _resolv_cache_get_nameserver(int n, char* addr, int addrLen)
1876 {
1877 char *ifname;
1878 int result = 0;
1879
1880 pthread_once(&_res_cache_once, _res_cache_init);
1881 pthread_mutex_lock(&_res_cache_list_lock);
1882
1883 ifname = _get_default_iface_locked();
1884 result = _get_nameserver_locked(ifname, n, addr, addrLen);
1885
1886 pthread_mutex_unlock(&_res_cache_list_lock);
1887 return result;
1888 }
1889
1890 static int
_get_nameserver_locked(const char * ifname,int n,char * addr,int addrLen)1891 _get_nameserver_locked(const char* ifname, int n, char* addr, int addrLen)
1892 {
1893 int len = 0;
1894 char* ns;
1895 struct resolv_cache_info* cache_info;
1896
1897 if (n < 1 || n > MAXNS || !addr)
1898 return 0;
1899
1900 cache_info = _find_cache_info_locked(ifname);
1901 if (cache_info) {
1902 ns = cache_info->nameservers[n - 1];
1903 if (ns) {
1904 len = strlen(ns);
1905 if (len < addrLen) {
1906 strncpy(addr, ns, len);
1907 addr[len] = '\0';
1908 } else {
1909 len = 0;
1910 }
1911 }
1912 }
1913
1914 return len;
1915 }
1916
1917 struct addrinfo*
_cache_get_nameserver_addr(int n)1918 _cache_get_nameserver_addr(int n)
1919 {
1920 struct addrinfo *result;
1921 char* ifname;
1922
1923 pthread_once(&_res_cache_once, _res_cache_init);
1924 pthread_mutex_lock(&_res_cache_list_lock);
1925
1926 ifname = _get_default_iface_locked();
1927
1928 result = _get_nameserver_addr_locked(ifname, n);
1929 pthread_mutex_unlock(&_res_cache_list_lock);
1930 return result;
1931 }
1932
1933 static struct addrinfo*
_get_nameserver_addr_locked(const char * ifname,int n)1934 _get_nameserver_addr_locked(const char* ifname, int n)
1935 {
1936 struct addrinfo* ai = NULL;
1937 struct resolv_cache_info* cache_info;
1938
1939 if (n < 1 || n > MAXNS)
1940 return NULL;
1941
1942 cache_info = _find_cache_info_locked(ifname);
1943 if (cache_info) {
1944 ai = cache_info->nsaddrinfo[n - 1];
1945 }
1946 return ai;
1947 }
1948
1949 void
_resolv_set_addr_of_iface(const char * ifname,struct in_addr * addr)1950 _resolv_set_addr_of_iface(const char* ifname, struct in_addr* addr)
1951 {
1952 pthread_once(&_res_cache_once, _res_cache_init);
1953 pthread_mutex_lock(&_res_cache_list_lock);
1954 struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
1955 if (cache_info) {
1956 memcpy(&cache_info->ifaddr, addr, sizeof(*addr));
1957
1958 if (DEBUG) {
1959 char* addr_s = inet_ntoa(cache_info->ifaddr);
1960 XLOG("address of interface %s is %s\n", ifname, addr_s);
1961 }
1962 }
1963 pthread_mutex_unlock(&_res_cache_list_lock);
1964 }
1965
1966 struct in_addr*
_resolv_get_addr_of_default_iface(void)1967 _resolv_get_addr_of_default_iface(void)
1968 {
1969 struct in_addr* ai = NULL;
1970 char* ifname;
1971
1972 pthread_once(&_res_cache_once, _res_cache_init);
1973 pthread_mutex_lock(&_res_cache_list_lock);
1974 ifname = _get_default_iface_locked();
1975 ai = _get_addr_locked(ifname);
1976 pthread_mutex_unlock(&_res_cache_list_lock);
1977
1978 return ai;
1979 }
1980
1981 struct in_addr*
_resolv_get_addr_of_iface(const char * ifname)1982 _resolv_get_addr_of_iface(const char* ifname)
1983 {
1984 struct in_addr* ai = NULL;
1985
1986 pthread_once(&_res_cache_once, _res_cache_init);
1987 pthread_mutex_lock(&_res_cache_list_lock);
1988 ai =_get_addr_locked(ifname);
1989 pthread_mutex_unlock(&_res_cache_list_lock);
1990 return ai;
1991 }
1992
1993 static struct in_addr*
_get_addr_locked(const char * ifname)1994 _get_addr_locked(const char * ifname)
1995 {
1996 struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
1997 if (cache_info) {
1998 return &cache_info->ifaddr;
1999 }
2000 return NULL;
2001 }
2002