1 /*
2 Copyright (c) 2013 - 2017, The Linux Foundation. All rights reserved.
3
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions are
6 met:
7 * Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above
10 copyright notice, this list of conditions and the following
11 disclaimer in the documentation and/or other materials provided
12 with the distribution.
13 * Neither the name of The Linux Foundation nor the names of its
14 contributors may be used to endorse or promote products derived
15 from this software without specific prior written permission.
16
17 THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
18 WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21 BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24 BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
26 OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
27 IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include "ipa_nat_drv.h"
31 #include "ipa_nat_drvi.h"
32
33 #ifdef USE_GLIB
34 #include <glib.h>
35 #define strlcpy g_strlcpy
36 #else
37 #ifndef FEATURE_IPA_ANDROID
strlcpy(char * dst,const char * src,size_t size)38 static size_t strlcpy(char * dst, const char * src, size_t size)
39 {
40 size_t i;
41
42 if (size < 1)
43 return 0;
44 for (i = 0; i < (size - 1) && src[i] != '\0'; i++)
45 dst[i] = src[i];
46 for (; i < size; i++)
47 dst[i] = '\0';
48 return strlen(dst);
49 }
50 #endif
51 #endif
52
53 struct ipa_nat_cache ipv4_nat_cache;
54 pthread_mutex_t nat_mutex = PTHREAD_MUTEX_INITIALIZER;
55
56 static ipa_nat_pdn_entry pdns[IPA_MAX_PDN_NUM];
57
58 /* ------------------------------------------
59 UTILITY FUNCTIONS START
60 --------------------------------------------*/
61
62 /**
63 * UpdateSwSpecParams() - updates sw specific params
64 * @rule: [in/out] nat table rule
65 * @param_type: [in] which param need to update
66 * @value: [in] value of param
67 *
68 * Update SW specific params in the passed rule.
69 *
70 * Returns: None
71 */
UpdateSwSpecParams(struct ipa_nat_rule * rule,uint8_t param_type,uint32_t value)72 void UpdateSwSpecParams(struct ipa_nat_rule *rule,
73 uint8_t param_type,
74 uint32_t value)
75 {
76 uint32_t temp = rule->sw_spec_params;
77
78 if (IPA_NAT_SW_PARAM_INDX_TBL_ENTRY_BYTE == param_type) {
79 value = (value << INDX_TBL_ENTRY_SIZE_IN_BITS);
80 temp &= 0x0000FFFF;
81 } else {
82 temp &= 0xFFFF0000;
83 }
84
85 temp = (temp | value);
86 rule->sw_spec_params = temp;
87 return;
88 }
89
90 /**
91 * Read8BitFieldValue()
92 * @rule: [in/out]
93 * @param_type: [in]
94 * @value: [in]
95 *
96 *
97 *
98 * Returns: None
99 */
100
Read8BitFieldValue(uint32_t param,ipa_nat_rule_field_type fld_type)101 uint8_t Read8BitFieldValue(uint32_t param,
102 ipa_nat_rule_field_type fld_type)
103 {
104 void *temp = (void *)¶m;
105
106 switch (fld_type) {
107
108 case PROTOCOL_FIELD:
109 return ((time_stamp_proto *)temp)->protocol;
110
111 default:
112 IPAERR("Invalid Field type passed\n");
113 return 0;
114 }
115 }
116
Read16BitFieldValue(uint32_t param,ipa_nat_rule_field_type fld_type)117 uint16_t Read16BitFieldValue(uint32_t param,
118 ipa_nat_rule_field_type fld_type)
119 {
120 void *temp = (void *)¶m;
121
122 switch (fld_type) {
123
124 case NEXT_INDEX_FIELD:
125 return ((next_index_pub_port *)temp)->next_index;
126
127 case PUBLIC_PORT_FILED:
128 return ((next_index_pub_port *)temp)->public_port;
129
130 case ENABLE_FIELD:
131 return ((ipcksum_enbl *)temp)->enable;
132
133 case SW_SPEC_PARAM_PREV_INDEX_FIELD:
134 return ((sw_spec_params *)temp)->prev_index;
135
136 case SW_SPEC_PARAM_INDX_TBL_ENTRY_FIELD:
137 return ((sw_spec_params *)temp)->index_table_entry;
138
139 case INDX_TBL_TBL_ENTRY_FIELD:
140 return ((tbl_ent_nxt_indx *)temp)->tbl_entry;
141
142 case INDX_TBL_NEXT_INDEX_FILED:
143 return ((tbl_ent_nxt_indx *)temp)->next_index;
144
145 #ifdef NAT_DUMP
146 case IP_CHKSUM_FIELD:
147 return ((ipcksum_enbl *)temp)->ip_chksum;
148 #endif
149
150 default:
151 IPAERR("Invalid Field type passed\n");
152 return 0;
153 }
154 }
155
Read32BitFieldValue(uint32_t param,ipa_nat_rule_field_type fld_type)156 uint32_t Read32BitFieldValue(uint32_t param,
157 ipa_nat_rule_field_type fld_type)
158 {
159
160 void *temp = (void *)¶m;
161
162 switch (fld_type) {
163
164 case TIME_STAMP_FIELD:
165 return ((time_stamp_proto *)temp)->time_stamp;
166
167 default:
168 IPAERR("Invalid Field type passed\n");
169 return 0;
170 }
171 }
172
173 /**
174 * GetIPAVer(void) - store IPA HW ver in cache
175 *
176 *
177 * Returns: 0 on success, negative on failure
178 */
GetIPAVer(void)179 int GetIPAVer(void)
180 {
181 int ret;
182
183 ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_GET_HW_VERSION, &ipv4_nat_cache.ver);
184 if (ret != 0) {
185 perror("GetIPAVer(): ioctl error value");
186 IPAERR("unable to get IPA version. Error ;%d\n", ret);
187 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
188 return -EINVAL;
189 }
190 IPADBG("IPA version is %d\n", ipv4_nat_cache.ver);
191 return 0;
192 }
193
194 /**
195 * CreateNatDevice() - Create nat devices
196 * @mem: [in] name of device that need to create
197 *
198 * Create Nat device and Register for file create
199 * notification in given directory and wait till
200 * receive notification
201 *
202 * Returns: 0 on success, negative on failure
203 */
CreateNatDevice(struct ipa_ioc_nat_alloc_mem * mem)204 int CreateNatDevice(struct ipa_ioc_nat_alloc_mem *mem)
205 {
206 int ret;
207
208 ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_ALLOC_NAT_MEM, mem);
209 if (ret != 0) {
210 perror("CreateNatDevice(): ioctl error value");
211 IPAERR("unable to post nat mem init. Error ;%d\n", ret);
212 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
213 return -EINVAL;
214 }
215 IPADBG("posted IPA_IOC_ALLOC_NAT_MEM to kernel successfully\n");
216 return 0;
217 }
218
219 /**
220 * GetNearest2Power() - Returns the nearest power of 2
221 * @num: [in] given number
222 * @ret: [out] nearest power of 2
223 *
224 * Returns the nearest power of 2 for a
225 * given number
226 *
227 * Returns: 0 on success, negative on failure
228 */
GetNearest2Power(uint16_t num,uint16_t * ret)229 int GetNearest2Power(uint16_t num, uint16_t *ret)
230 {
231 uint16_t number = num;
232 uint16_t tmp = 1;
233 *ret = 0;
234
235 if (0 == num) {
236 return -EINVAL;
237 }
238
239 if (1 == num) {
240 *ret = 2;
241 return 0;
242 }
243
244 for (;;) {
245 if (1 == num) {
246 if (number != tmp) {
247 tmp *= 2;
248 }
249
250 *ret = tmp;
251 return 0;
252 }
253
254 num >>= 1;
255 tmp *= 2;
256 }
257
258 return -EINVAL;
259 }
260
261 /**
262 * GetNearestEven() - Returns the nearest even number
263 * @num: [in] given number
264 * @ret: [out] nearest even number
265 *
266 * Returns the nearest even number for a given number
267 *
268 * Returns: 0 on success, negative on failure
269 */
GetNearestEven(uint16_t num,uint16_t * ret)270 void GetNearestEven(uint16_t num, uint16_t *ret)
271 {
272
273 if (num < 2) {
274 *ret = 2;
275 return;
276 }
277
278 while ((num % 2) != 0) {
279 num = num + 1;
280 }
281
282 *ret = num;
283 return;
284 }
285
286 /**
287 * dst_hash() - Find the index into ipv4 base table
288 * @public_ip: [in] public_ip
289 * @trgt_ip: [in] Target IP address
290 * @trgt_port: [in] Target port
291 * @public_port: [in] Public port
292 * @proto: [in] Protocol (TCP/IP)
293 * @size: [in] size of the ipv4 base Table
294 *
295 * This hash method is used to find the hash index of new nat
296 * entry into ipv4 base table. In case of zero index, the
297 * new entry will be stored into N-1 index where N is size of
298 * ipv4 base table
299 *
300 * Returns: >0 index into ipv4 base table, negative on failure
301 */
dst_hash(uint32_t public_ip,uint32_t trgt_ip,uint16_t trgt_port,uint16_t public_port,uint8_t proto,uint16_t size)302 static uint16_t dst_hash(uint32_t public_ip, uint32_t trgt_ip,
303 uint16_t trgt_port, uint16_t public_port,
304 uint8_t proto, uint16_t size)
305 {
306 uint16_t hash = ((uint16_t)(trgt_ip)) ^ ((uint16_t)(trgt_ip >> 16)) ^
307 (trgt_port) ^ (public_port) ^ (proto);
308
309 if (ipv4_nat_cache.ver >= IPA_HW_v4_0)
310 hash ^= ((uint16_t)(public_ip)) ^
311 ((uint16_t)(public_ip >> 16));
312
313 IPADBG("public ip 0x%X\n", public_ip);
314 IPADBG("trgt_ip: 0x%x trgt_port: 0x%x\n", trgt_ip, trgt_port);
315 IPADBG("public_port: 0x%x\n", public_port);
316 IPADBG("proto: 0x%x size: 0x%x\n", proto, size);
317
318 hash = (hash & size);
319
320 /* If the hash resulted to zero then set it to maximum value
321 as zero is unused entry in nat tables */
322 if (0 == hash) {
323 return size;
324 }
325
326 IPADBG("dst_hash returning value: %d\n", hash);
327 return hash;
328 }
329
330 /**
331 * src_hash() - Find the index into ipv4 index base table
332 * @priv_ip: [in] Private IP address
333 * @priv_port: [in] Private port
334 * @trgt_ip: [in] Target IP address
335 * @trgt_port: [in] Target Port
336 * @proto: [in] Protocol (TCP/IP)
337 * @size: [in] size of the ipv4 index base Table
338 *
339 * This hash method is used to find the hash index of new nat
340 * entry into ipv4 index base table. In case of zero index, the
341 * new entry will be stored into N-1 index where N is size of
342 * ipv4 index base table
343 *
344 * Returns: >0 index into ipv4 index base table, negative on failure
345 */
src_hash(uint32_t priv_ip,uint16_t priv_port,uint32_t trgt_ip,uint16_t trgt_port,uint8_t proto,uint16_t size)346 static uint16_t src_hash(uint32_t priv_ip, uint16_t priv_port,
347 uint32_t trgt_ip, uint16_t trgt_port,
348 uint8_t proto, uint16_t size)
349 {
350 uint16_t hash = ((uint16_t)(priv_ip)) ^ ((uint16_t)(priv_ip >> 16)) ^
351 (priv_port) ^
352 ((uint16_t)(trgt_ip)) ^ ((uint16_t)(trgt_ip >> 16)) ^
353 (trgt_port) ^ (proto);
354
355 IPADBG("priv_ip: 0x%x priv_port: 0x%x\n", priv_ip, priv_port);
356 IPADBG("trgt_ip: 0x%x trgt_port: 0x%x\n", trgt_ip, trgt_port);
357 IPADBG("proto: 0x%x size: 0x%x\n", proto, size);
358
359 hash = (hash & size);
360
361 /* If the hash resulted to zero then set it to maximum value
362 as zero is unused entry in nat tables */
363 if (0 == hash) {
364 return size;
365 }
366
367 IPADBG("src_hash returning value: %d\n", hash);
368 return hash;
369 }
370
371 /**
372 * ipa_nati_calc_ip_cksum() - Calculate the source nat
373 * IP checksum diff
374 * @pub_ip_addr: [in] public ip address
375 * @priv_ip_addr: [in] Private ip address
376 *
377 * source nat ip checksum different is calculated as
378 * public_ip_addr - private_ip_addr
379 * Here we are using 1's complement to represent -ve number.
380 * So take 1's complement of private ip addr and add it
381 * to public ip addr.
382 *
383 * Returns: >0 ip checksum diff
384 */
ipa_nati_calc_ip_cksum(uint32_t pub_ip_addr,uint32_t priv_ip_addr)385 static uint16_t ipa_nati_calc_ip_cksum(uint32_t pub_ip_addr,
386 uint32_t priv_ip_addr)
387 {
388 uint16_t ret;
389 uint32_t cksum = 0;
390
391 /* Add LSB(2 bytes) of public ip address to cksum */
392 cksum += (pub_ip_addr & 0xFFFF);
393
394 /* Add MSB(2 bytes) of public ip address to cksum
395 and check for carry forward(CF), if any add it
396 */
397 cksum += (pub_ip_addr>>16);
398 if (cksum >> 16) {
399 cksum = (cksum & 0x0000FFFF);
400 cksum += 1;
401 }
402
403 /* Calculate the 1's complement of private ip address */
404 priv_ip_addr = (~priv_ip_addr);
405
406 /* Add LSB(2 bytes) of private ip address to cksum
407 and check for carry forward(CF), if any add it
408 */
409 cksum += (priv_ip_addr & 0xFFFF);
410 if (cksum >> 16) {
411 cksum = (cksum & 0x0000FFFF);
412 cksum += 1;
413 }
414
415 /* Add MSB(2 bytes) of private ip address to cksum
416 and check for carry forward(CF), if any add it
417 */
418 cksum += (priv_ip_addr>>16);
419 if (cksum >> 16) {
420 cksum = (cksum & 0x0000FFFF);
421 cksum += 1;
422 }
423
424 /* Return the LSB(2 bytes) of checksum */
425 ret = (uint16_t)cksum;
426 return ret;
427 }
428
429 /**
430 * ipa_nati_calc_tcp_udp_cksum() - Calculate the source nat
431 * TCP/UDP checksum diff
432 * @pub_ip_addr: [in] public ip address
433 * @pub_port: [in] public tcp/udp port
434 * @priv_ip_addr: [in] Private ip address
435 * @priv_port: [in] Private tcp/udp prot
436 *
437 * source nat tcp/udp checksum is calculated as
438 * (pub_ip_addr + pub_port) - (priv_ip_addr + priv_port)
439 * Here we are using 1's complement to represent -ve number.
440 * So take 1's complement of prviate ip addr &private port
441 * and add it public ip addr & public port.
442 *
443 * Returns: >0 tcp/udp checksum diff
444 */
ipa_nati_calc_tcp_udp_cksum(uint32_t pub_ip_addr,uint16_t pub_port,uint32_t priv_ip_addr,uint16_t priv_port)445 static uint16_t ipa_nati_calc_tcp_udp_cksum(uint32_t pub_ip_addr,
446 uint16_t pub_port,
447 uint32_t priv_ip_addr,
448 uint16_t priv_port)
449 {
450 uint16_t ret = 0;
451 uint32_t cksum = 0;
452
453 /* Add LSB(2 bytes) of public ip address to cksum */
454 cksum += (pub_ip_addr & 0xFFFF);
455
456 /* Add MSB(2 bytes) of public ip address to cksum
457 and check for carry forward(CF), if any add it
458 */
459 cksum += (pub_ip_addr>>16);
460 if (cksum >> 16) {
461 cksum = (cksum & 0x0000FFFF);
462 cksum += 1;
463 }
464
465 /* Add public port to cksum and
466 check for carry forward(CF), if any add it */
467 cksum += pub_port;
468 if (cksum >> 16) {
469 cksum = (cksum & 0x0000FFFF);
470 cksum += 1;
471 }
472
473 /* Calculate the 1's complement of private ip address */
474 priv_ip_addr = (~priv_ip_addr);
475
476 /* Add LSB(2 bytes) of private ip address to cksum
477 and check for carry forward(CF), if any add it
478 */
479 cksum += (priv_ip_addr & 0xFFFF);
480 if (cksum >> 16) {
481 cksum = (cksum & 0x0000FFFF);
482 cksum += 1;
483 }
484
485 /* Add MSB(2 bytes) of private ip address to cksum
486 and check for carry forward(CF), if any add
487 */
488 cksum += (priv_ip_addr>>16);
489 if (cksum >> 16) {
490 cksum = (cksum & 0x0000FFFF);
491 cksum += 1;
492 }
493
494 /* Calculate the 1's complement of private port */
495 priv_port = (~priv_port);
496
497 /* Add public port to cksum and
498 check for carry forward(CF), if any add it */
499 cksum += priv_port;
500 if (cksum >> 16) {
501 cksum = (cksum & 0x0000FFFF);
502 cksum += 1;
503 }
504
505 /* return the LSB(2 bytes) of checksum */
506 ret = (uint16_t)cksum;
507 return ret;
508 }
509
510 /**
511 * ipa_nati_make_rule_hdl() - makes nat rule handle
512 * @tbl_hdl: [in] nat table handle
513 * @tbl_entry: [in] nat table entry
514 *
515 * Calculate the nat rule handle which from
516 * nat entry which will be returned to client of
517 * nat driver
518 *
519 * Returns: >0 nat rule handle
520 */
ipa_nati_make_rule_hdl(uint16_t tbl_hdl,uint16_t tbl_entry)521 uint16_t ipa_nati_make_rule_hdl(uint16_t tbl_hdl,
522 uint16_t tbl_entry)
523 {
524 struct ipa_nat_ip4_table_cache *tbl_ptr;
525 uint16_t rule_hdl = 0;
526 uint16_t cnt = 0;
527
528 tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_hdl-1];
529
530 if (tbl_entry >= tbl_ptr->table_entries) {
531 /* Increase the current expansion table count */
532 tbl_ptr->cur_expn_tbl_cnt++;
533
534 /* Update the index into table */
535 rule_hdl = tbl_entry - tbl_ptr->table_entries;
536 rule_hdl = (rule_hdl << IPA_NAT_RULE_HDL_TBL_TYPE_BITS);
537 /* Update the table type mask */
538 rule_hdl = (rule_hdl | IPA_NAT_RULE_HDL_TBL_TYPE_MASK);
539 } else {
540 /* Increase the current count */
541 tbl_ptr->cur_tbl_cnt++;
542
543 rule_hdl = tbl_entry;
544 rule_hdl = (rule_hdl << IPA_NAT_RULE_HDL_TBL_TYPE_BITS);
545 }
546
547 for (; cnt < (tbl_ptr->table_entries + tbl_ptr->expn_table_entries); cnt++) {
548 if (IPA_NAT_INVALID_NAT_ENTRY == tbl_ptr->rule_id_array[cnt]) {
549 tbl_ptr->rule_id_array[cnt] = rule_hdl;
550 return cnt + 1;
551 }
552 }
553
554 return 0;
555 }
556
557 /**
558 * ipa_nati_parse_ipv4_rule_hdl() - prase rule handle
559 * @tbl_hdl: [in] nat table rule
560 * @rule_hdl: [in] nat rule handle
561 * @expn_tbl: [out] expansion table or not
562 * @tbl_entry: [out] index into table
563 *
564 * Parse the rule handle to retrieve the nat table
565 * type and entry of nat table
566 *
567 * Returns: None
568 */
ipa_nati_parse_ipv4_rule_hdl(uint8_t tbl_index,uint16_t rule_hdl,uint8_t * expn_tbl,uint16_t * tbl_entry)569 void ipa_nati_parse_ipv4_rule_hdl(uint8_t tbl_index,
570 uint16_t rule_hdl, uint8_t *expn_tbl,
571 uint16_t *tbl_entry)
572 {
573 struct ipa_nat_ip4_table_cache *tbl_ptr;
574 uint16_t rule_id;
575
576 *expn_tbl = 0;
577 *tbl_entry = IPA_NAT_INVALID_NAT_ENTRY;
578 tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_index];
579
580 if (rule_hdl >= (tbl_ptr->table_entries + tbl_ptr->expn_table_entries)) {
581 IPAERR("invalid rule handle\n");
582 return;
583 }
584
585 rule_id = tbl_ptr->rule_id_array[rule_hdl-1];
586
587 /* Retrieve the table type */
588 *expn_tbl = 0;
589 if (rule_id & IPA_NAT_RULE_HDL_TBL_TYPE_MASK) {
590 *expn_tbl = 1;
591 }
592
593 /* Retrieve the table entry */
594 *tbl_entry = (rule_id >> IPA_NAT_RULE_HDL_TBL_TYPE_BITS);
595 return;
596 }
597
ipa_nati_get_entry_offset(struct ipa_nat_ip4_table_cache * cache_ptr,nat_table_type tbl_type,uint16_t tbl_entry)598 uint32_t ipa_nati_get_entry_offset(struct ipa_nat_ip4_table_cache *cache_ptr,
599 nat_table_type tbl_type,
600 uint16_t tbl_entry)
601 {
602 struct ipa_nat_rule *tbl_ptr;
603 uint32_t ret = 0;
604
605 if (IPA_NAT_EXPN_TBL == tbl_type) {
606 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
607 } else {
608 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
609 }
610
611 ret = (char *)&tbl_ptr[tbl_entry] - (char *)tbl_ptr;
612 ret += cache_ptr->tbl_addr_offset;
613 return ret;
614 }
615
ipa_nati_get_index_entry_offset(struct ipa_nat_ip4_table_cache * cache_ptr,nat_table_type tbl_type,uint16_t indx_tbl_entry)616 uint32_t ipa_nati_get_index_entry_offset(struct ipa_nat_ip4_table_cache *cache_ptr,
617 nat_table_type tbl_type,
618 uint16_t indx_tbl_entry)
619 {
620 struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
621 uint32_t ret = 0;
622
623 if (IPA_NAT_INDEX_EXPN_TBL == tbl_type) {
624 indx_tbl_ptr =
625 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
626 } else {
627 indx_tbl_ptr =
628 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
629 }
630
631 ret = (char *)&indx_tbl_ptr[indx_tbl_entry] - (char *)indx_tbl_ptr;
632 ret += cache_ptr->tbl_addr_offset;
633 return ret;
634 }
635
636 /* ------------------------------------------
637 UTILITY FUNCTIONS END
638 --------------------------------------------*/
639
640 /* ------------------------------------------
641 Main Functions
642 --------------------------------------------**/
ipa_nati_reset_tbl(uint8_t tbl_indx)643 void ipa_nati_reset_tbl(uint8_t tbl_indx)
644 {
645 uint16_t table_entries = ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries;
646 uint16_t expn_table_entries = ipv4_nat_cache.ip4_tbl[tbl_indx].expn_table_entries;
647
648 /* Base table */
649 IPADBG("memset() base table to 0, %p\n",
650 ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr);
651
652 memset(ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr,
653 0,
654 IPA_NAT_TABLE_ENTRY_SIZE * table_entries);
655
656 /* Base expansino table */
657 IPADBG("memset() expn base table to 0, %p\n",
658 ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_expn_rules_addr);
659
660 memset(ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_expn_rules_addr,
661 0,
662 IPA_NAT_TABLE_ENTRY_SIZE * expn_table_entries);
663
664 /* Index table */
665 IPADBG("memset() index table to 0, %p\n",
666 ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_addr);
667
668 memset(ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_addr,
669 0,
670 IPA_NAT_INDEX_TABLE_ENTRY_SIZE * table_entries);
671
672 /* Index expansion table */
673 IPADBG("memset() index expn table to 0, %p\n",
674 ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_expn_addr);
675
676 memset(ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_expn_addr,
677 0,
678 IPA_NAT_INDEX_TABLE_ENTRY_SIZE * expn_table_entries);
679
680 IPADBG("returning from ipa_nati_reset_tbl()\n");
681 return;
682 }
683
ipa_nati_add_ipv4_tbl(uint32_t public_ip_addr,uint16_t number_of_entries,uint32_t * tbl_hdl)684 int ipa_nati_add_ipv4_tbl(uint32_t public_ip_addr,
685 uint16_t number_of_entries,
686 uint32_t *tbl_hdl)
687 {
688 struct ipa_ioc_nat_alloc_mem mem;
689 uint8_t tbl_indx = ipv4_nat_cache.table_cnt;
690 uint16_t table_entries, expn_table_entries;
691 int ret;
692
693 *tbl_hdl = 0;
694 /* Allocate table */
695 memset(&mem, 0, sizeof(mem));
696 ret = ipa_nati_alloc_table(number_of_entries,
697 &mem,
698 &table_entries,
699 &expn_table_entries);
700 if (0 != ret) {
701 IPAERR("unable to allocate nat table\n");
702 return -ENOMEM;
703 }
704
705 /* Update the cache
706 The (IPA_NAT_UNUSED_BASE_ENTRIES/2) indicates zero entry entries
707 for both base and expansion table
708 */
709 ret = ipa_nati_update_cache(&mem,
710 public_ip_addr,
711 table_entries,
712 expn_table_entries);
713 if (0 != ret) {
714 IPAERR("unable to update cache Error: %d\n", ret);
715 return -EINVAL;
716 }
717
718 /* Reset the nat table before posting init cmd */
719 ipa_nati_reset_tbl(tbl_indx);
720
721 /* Initialize the ipa hw with nat table dimensions */
722 ret = ipa_nati_post_ipv4_init_cmd(tbl_indx);
723 if (0 != ret) {
724 IPAERR("unable to post nat_init command Error %d\n", ret);
725 return -EINVAL;
726 }
727
728 /* store the initial public ip address in the cached pdn table
729 this is backward compatible for pre IPAv4 versions, we will always
730 use this ip as the single PDN address
731 */
732 pdns[0].public_ip = public_ip_addr;
733
734 /* Return table handle */
735 ipv4_nat_cache.table_cnt++;
736 *tbl_hdl = ipv4_nat_cache.table_cnt;
737
738 #ifdef NAT_DUMP
739 ipa_nat_dump_ipv4_table(*tbl_hdl);
740 #endif
741 return 0;
742 }
743
ipa_nati_alloc_table(uint16_t number_of_entries,struct ipa_ioc_nat_alloc_mem * mem,uint16_t * table_entries,uint16_t * expn_table_entries)744 int ipa_nati_alloc_table(uint16_t number_of_entries,
745 struct ipa_ioc_nat_alloc_mem *mem,
746 uint16_t *table_entries,
747 uint16_t *expn_table_entries)
748 {
749 int fd = 0, ret;
750 uint16_t total_entries;
751
752 /* Copy the table name */
753 strlcpy(mem->dev_name, NAT_DEV_NAME, IPA_RESOURCE_NAME_MAX);
754
755 /* Calculate the size for base table and expansion table */
756 *table_entries = (uint16_t)(number_of_entries * IPA_NAT_BASE_TABLE_PERCENTAGE);
757 if (*table_entries == 0) {
758 *table_entries = 1;
759 }
760 if (GetNearest2Power(*table_entries, table_entries)) {
761 IPAERR("unable to calculate power of 2\n");
762 return -EINVAL;
763 }
764
765 *expn_table_entries = (uint16_t)(number_of_entries * IPA_NAT_EXPANSION_TABLE_PERCENTAGE);
766 GetNearestEven(*expn_table_entries, expn_table_entries);
767
768 total_entries = (*table_entries)+(*expn_table_entries);
769
770 /* Calclate the memory size for both table and index table entries */
771 mem->size = (IPA_NAT_TABLE_ENTRY_SIZE * total_entries);
772 IPADBG("Nat Table size: %zu\n", mem->size);
773 mem->size += (IPA_NAT_INDEX_TABLE_ENTRY_SIZE * total_entries);
774 IPADBG("Nat Base and Index Table size: %zu\n", mem->size);
775
776 if (!ipv4_nat_cache.ipa_fd) {
777 fd = open(IPA_DEV_NAME, O_RDONLY);
778 if (fd < 0) {
779 perror("ipa_nati_alloc_table(): open error value:");
780 IPAERR("unable to open ipa device\n");
781 return -EIO;
782 }
783 ipv4_nat_cache.ipa_fd = fd;
784 }
785
786 if (GetIPAVer()) {
787 IPAERR("unable to get ipa ver\n");
788 return -EIO;
789 }
790
791 ret = CreateNatDevice(mem);
792 return ret;
793 }
794
795
ipa_nati_update_cache(struct ipa_ioc_nat_alloc_mem * mem,uint32_t public_addr,uint16_t tbl_entries,uint16_t expn_tbl_entries)796 int ipa_nati_update_cache(struct ipa_ioc_nat_alloc_mem *mem,
797 uint32_t public_addr,
798 uint16_t tbl_entries,
799 uint16_t expn_tbl_entries)
800 {
801 uint32_t index = ipv4_nat_cache.table_cnt;
802 char *ipv4_rules_addr = NULL;
803
804 int fd = 0;
805 int flags = MAP_SHARED;
806 int prot = PROT_READ | PROT_WRITE;
807 off_t offset = 0;
808 #ifdef IPA_ON_R3PC
809 int ret = 0;
810 uint32_t nat_mem_offset = 0;
811 #endif
812
813 ipv4_nat_cache.ip4_tbl[index].valid = IPA_NAT_TABLE_VALID;
814 ipv4_nat_cache.ip4_tbl[index].public_addr = public_addr;
815 ipv4_nat_cache.ip4_tbl[index].size = mem->size;
816 ipv4_nat_cache.ip4_tbl[index].tbl_addr_offset = mem->offset;
817
818 ipv4_nat_cache.ip4_tbl[index].table_entries = tbl_entries;
819 ipv4_nat_cache.ip4_tbl[index].expn_table_entries = expn_tbl_entries;
820
821 IPADBG("num of ipv4 rules:%d\n", tbl_entries);
822 IPADBG("num of ipv4 expn rules:%d\n", expn_tbl_entries);
823
824 /* allocate memory for nat index expansion table */
825 if (NULL == ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta) {
826 ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta =
827 malloc(sizeof(struct ipa_nat_indx_tbl_meta_info) * expn_tbl_entries);
828
829 if (NULL == ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta) {
830 IPAERR("Fail to allocate ipv4 index expansion table meta\n");
831 return 0;
832 }
833
834 memset(ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta,
835 0,
836 sizeof(struct ipa_nat_indx_tbl_meta_info) * expn_tbl_entries);
837 }
838
839 /* Allocate memory for rule_id_array */
840 if (NULL == ipv4_nat_cache.ip4_tbl[index].rule_id_array) {
841 ipv4_nat_cache.ip4_tbl[index].rule_id_array =
842 malloc(sizeof(uint16_t) * (tbl_entries + expn_tbl_entries));
843
844 if (NULL == ipv4_nat_cache.ip4_tbl[index].rule_id_array) {
845 IPAERR("Fail to allocate rule id array\n");
846 return 0;
847 }
848
849 memset(ipv4_nat_cache.ip4_tbl[index].rule_id_array,
850 0,
851 sizeof(uint16_t) * (tbl_entries + expn_tbl_entries));
852 }
853
854
855 /* open the nat table */
856 strlcpy(mem->dev_name, NAT_DEV_FULL_NAME, IPA_RESOURCE_NAME_MAX);
857 fd = open(mem->dev_name, O_RDWR);
858 if (fd < 0) {
859 perror("ipa_nati_update_cache(): open error value:");
860 IPAERR("unable to open nat device. Error:%d\n", fd);
861 return -EIO;
862 }
863
864 /* copy the nat table name */
865 strlcpy(ipv4_nat_cache.ip4_tbl[index].table_name,
866 mem->dev_name,
867 IPA_RESOURCE_NAME_MAX);
868 ipv4_nat_cache.ip4_tbl[index].nat_fd = fd;
869
870 /* open the nat device Table */
871 #ifndef IPA_ON_R3PC
872 ipv4_rules_addr = (void *)mmap(NULL, mem->size,
873 prot, flags,
874 fd, offset);
875 #else
876 IPADBG("user space r3pc\n");
877 ipv4_rules_addr = (void *)mmap((caddr_t)0, NAT_MMAP_MEM_SIZE,
878 prot, flags,
879 fd, offset);
880 #endif
881 if (MAP_FAILED == ipv4_rules_addr) {
882 perror("unable to mmap the memory\n");
883 return -EINVAL;
884 }
885
886 #ifdef IPA_ON_R3PC
887 ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_GET_NAT_OFFSET, &nat_mem_offset);
888 if (ret != 0) {
889 perror("ipa_nati_post_ipv4_init_cmd(): ioctl error value");
890 IPAERR("unable to post ant offset cmd Error: %d\n", ret);
891 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
892 return -EIO;
893 }
894 ipv4_rules_addr += nat_mem_offset;
895 ipv4_nat_cache.ip4_tbl[index].mmap_offset = nat_mem_offset;
896 #endif
897
898 IPADBG("mmap return value 0x%lx\n", (long unsigned int)ipv4_rules_addr);
899
900 ipv4_nat_cache.ip4_tbl[index].ipv4_rules_addr = ipv4_rules_addr;
901
902 ipv4_nat_cache.ip4_tbl[index].ipv4_expn_rules_addr =
903 ipv4_rules_addr + (IPA_NAT_TABLE_ENTRY_SIZE * tbl_entries);
904
905 ipv4_nat_cache.ip4_tbl[index].index_table_addr =
906 ipv4_rules_addr + (IPA_NAT_TABLE_ENTRY_SIZE * (tbl_entries + expn_tbl_entries));
907
908 ipv4_nat_cache.ip4_tbl[index].index_table_expn_addr =
909 ipv4_rules_addr +
910 (IPA_NAT_TABLE_ENTRY_SIZE * (tbl_entries + expn_tbl_entries))+
911 (IPA_NAT_INDEX_TABLE_ENTRY_SIZE * tbl_entries);
912
913 return 0;
914 }
915
916 /* comment: check the implementation once
917 offset should be in terms of byes */
ipa_nati_post_ipv4_init_cmd(uint8_t tbl_index)918 int ipa_nati_post_ipv4_init_cmd(uint8_t tbl_index)
919 {
920 struct ipa_ioc_v4_nat_init cmd;
921 uint32_t offset = ipv4_nat_cache.ip4_tbl[tbl_index].tbl_addr_offset;
922 int ret;
923
924 cmd.tbl_index = tbl_index;
925
926 cmd.ipv4_rules_offset = offset;
927 cmd.expn_rules_offset = cmd.ipv4_rules_offset +
928 (ipv4_nat_cache.ip4_tbl[tbl_index].table_entries * IPA_NAT_TABLE_ENTRY_SIZE);
929
930 cmd.index_offset = cmd.expn_rules_offset +
931 (ipv4_nat_cache.ip4_tbl[tbl_index].expn_table_entries * IPA_NAT_TABLE_ENTRY_SIZE);
932
933 cmd.index_expn_offset = cmd.index_offset +
934 (ipv4_nat_cache.ip4_tbl[tbl_index].table_entries * IPA_NAT_INDEX_TABLE_ENTRY_SIZE);
935
936 cmd.table_entries = ipv4_nat_cache.ip4_tbl[tbl_index].table_entries - 1;
937 cmd.expn_table_entries = ipv4_nat_cache.ip4_tbl[tbl_index].expn_table_entries;
938
939 cmd.ip_addr = ipv4_nat_cache.ip4_tbl[tbl_index].public_addr;
940
941 ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_V4_INIT_NAT, &cmd);
942 if (ret != 0) {
943 perror("ipa_nati_post_ipv4_init_cmd(): ioctl error value");
944 IPAERR("unable to post init cmd Error: %d\n", ret);
945 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
946 return -EINVAL;
947 }
948 IPADBG("Posted IPA_IOC_V4_INIT_NAT to kernel successfully\n");
949
950 return 0;
951 }
952
ipa_nati_del_ipv4_table(uint32_t tbl_hdl)953 int ipa_nati_del_ipv4_table(uint32_t tbl_hdl)
954 {
955 uint8_t index = (uint8_t)(tbl_hdl - 1);
956 void *addr = (void *)ipv4_nat_cache.ip4_tbl[index].ipv4_rules_addr;
957 struct ipa_ioc_v4_nat_del del_cmd;
958 int ret;
959
960 if (!ipv4_nat_cache.ip4_tbl[index].valid) {
961 IPAERR("invalid table handle passed\n");
962 ret = -EINVAL;
963 goto fail;
964 }
965
966 if (pthread_mutex_lock(&nat_mutex) != 0) {
967 ret = -1;
968 goto lock_mutex_fail;
969 }
970
971 /* unmap the device memory from user space */
972 #ifndef IPA_ON_R3PC
973 munmap(addr, ipv4_nat_cache.ip4_tbl[index].size);
974 #else
975 addr = (char *)addr - ipv4_nat_cache.ip4_tbl[index].mmap_offset;
976 munmap(addr, NAT_MMAP_MEM_SIZE);
977 #endif
978
979 /* close the file descriptor of nat device */
980 if (close(ipv4_nat_cache.ip4_tbl[index].nat_fd)) {
981 IPAERR("unable to close the file descriptor\n");
982 ret = -EINVAL;
983 if (pthread_mutex_unlock(&nat_mutex) != 0)
984 goto unlock_mutex_fail;
985 goto fail;
986 }
987
988 del_cmd.table_index = index;
989 del_cmd.public_ip_addr = ipv4_nat_cache.ip4_tbl[index].public_addr;
990 ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_V4_DEL_NAT, &del_cmd);
991 if (ret != 0) {
992 perror("ipa_nati_del_ipv4_table(): ioctl error value");
993 IPAERR("unable to post nat del command init Error: %d\n", ret);
994 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
995 ret = -EINVAL;
996 if (pthread_mutex_unlock(&nat_mutex) != 0)
997 goto unlock_mutex_fail;
998 goto fail;
999 }
1000 IPAERR("posted IPA_IOC_V4_DEL_NAT to kernel successfully\n");
1001
1002 free(ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta);
1003 free(ipv4_nat_cache.ip4_tbl[index].rule_id_array);
1004
1005 memset(&ipv4_nat_cache.ip4_tbl[index],
1006 0,
1007 sizeof(ipv4_nat_cache.ip4_tbl[index]));
1008
1009 /* Decrease the table count by 1*/
1010 ipv4_nat_cache.table_cnt--;
1011
1012 if (pthread_mutex_unlock(&nat_mutex) != 0) {
1013 ret = -1;
1014 goto unlock_mutex_fail;
1015 }
1016
1017 return 0;
1018
1019 lock_mutex_fail:
1020 IPAERR("unable to lock the nat mutex\n");
1021 return ret;
1022
1023 unlock_mutex_fail:
1024 IPAERR("unable to unlock the nat mutex\n");
1025
1026 fail:
1027 return ret;
1028 }
1029
ipa_nati_query_timestamp(uint32_t tbl_hdl,uint32_t rule_hdl,uint32_t * time_stamp)1030 int ipa_nati_query_timestamp(uint32_t tbl_hdl,
1031 uint32_t rule_hdl,
1032 uint32_t *time_stamp)
1033 {
1034 uint8_t tbl_index = (uint8_t)(tbl_hdl - 1);
1035 uint8_t expn_tbl = 0;
1036 uint16_t tbl_entry = 0;
1037 struct ipa_nat_rule *tbl_ptr = NULL;
1038
1039 if (!ipv4_nat_cache.ip4_tbl[tbl_index].valid) {
1040 IPAERR("invalid table handle\n");
1041 return -EINVAL;
1042 }
1043
1044 if (pthread_mutex_lock(&nat_mutex) != 0) {
1045 IPAERR("unable to lock the nat mutex\n");
1046 return -1;
1047 }
1048
1049 ipa_nati_parse_ipv4_rule_hdl(tbl_index, (uint16_t)rule_hdl,
1050 &expn_tbl, &tbl_entry);
1051
1052 tbl_ptr =
1053 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_index].ipv4_rules_addr;
1054 if (expn_tbl) {
1055 tbl_ptr =
1056 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_index].ipv4_expn_rules_addr;
1057 }
1058
1059 if (tbl_ptr)
1060 *time_stamp = Read32BitFieldValue(tbl_ptr[tbl_entry].ts_proto,
1061 TIME_STAMP_FIELD);
1062
1063 if (pthread_mutex_unlock(&nat_mutex) != 0) {
1064 IPAERR("unable to unlock the nat mutex\n");
1065 return -1;
1066 }
1067
1068 return 0;
1069 }
1070
ipa_nati_modify_pdn(struct ipa_ioc_nat_pdn_entry * entry)1071 int ipa_nati_modify_pdn(struct ipa_ioc_nat_pdn_entry *entry)
1072 {
1073 if (entry->public_ip == 0)
1074 IPADBG("PDN %d public ip will be set to 0\n", entry->pdn_index);
1075
1076 if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_MODIFY_PDN, entry)) {
1077 perror("ipa_nati_modify_pdn(): ioctl error value");
1078 IPAERR("unable to call modify pdn icotl\n");
1079 IPAERR("index %d, ip 0x%X, src_metdata 0x%X, dst_metadata 0x%X\n",
1080 entry->pdn_index, entry->public_ip, entry->src_metadata, entry->dst_metadata);
1081 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
1082 return -EIO;
1083 }
1084
1085 pdns[entry->pdn_index].public_ip = entry->public_ip;
1086 pdns[entry->pdn_index].dst_metadata = entry->dst_metadata;
1087 pdns[entry->pdn_index].src_metadata = entry->src_metadata;
1088
1089 IPADBG("posted IPA_IOC_NAT_MODIFY_PDN to kernel successfully and stored in cache\n index %d, ip 0x%X, src_metdata 0x%X, dst_metadata 0x%X\n",
1090 entry->pdn_index, entry->public_ip, entry->src_metadata, entry->dst_metadata);
1091
1092 return 0;
1093 }
1094
ipa_nati_add_ipv4_rule(uint32_t tbl_hdl,const ipa_nat_ipv4_rule * clnt_rule,uint32_t * rule_hdl)1095 int ipa_nati_add_ipv4_rule(uint32_t tbl_hdl,
1096 const ipa_nat_ipv4_rule *clnt_rule,
1097 uint32_t *rule_hdl)
1098 {
1099 struct ipa_nat_ip4_table_cache *tbl_ptr;
1100 struct ipa_nat_sw_rule sw_rule;
1101 struct ipa_nat_indx_tbl_sw_rule index_sw_rule;
1102 uint16_t new_entry, new_index_tbl_entry;
1103
1104 /* verify that the rule's PDN is valid */
1105 if (clnt_rule->pdn_index >= IPA_MAX_PDN_NUM ||
1106 pdns[clnt_rule->pdn_index].public_ip == 0) {
1107 IPAERR("invalid parameters, pdn index %d, public ip = 0x%X\n",
1108 clnt_rule->pdn_index, pdns[clnt_rule->pdn_index].public_ip);
1109 return -EINVAL;
1110 }
1111
1112 memset(&sw_rule, 0, sizeof(sw_rule));
1113 memset(&index_sw_rule, 0, sizeof(index_sw_rule));
1114
1115 /* Generate rule from client input */
1116 if (ipa_nati_generate_rule(tbl_hdl, clnt_rule,
1117 &sw_rule, &index_sw_rule,
1118 &new_entry, &new_index_tbl_entry)) {
1119 IPAERR("unable to generate rule\n");
1120 return -EINVAL;
1121 }
1122
1123 tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_hdl-1];
1124 ipa_nati_copy_ipv4_rule_to_hw(tbl_ptr, &sw_rule, new_entry, (uint8_t)(tbl_hdl-1));
1125 ipa_nati_copy_ipv4_index_rule_to_hw(tbl_ptr,
1126 &index_sw_rule,
1127 new_index_tbl_entry,
1128 (uint8_t)(tbl_hdl-1));
1129
1130 IPADBG("new entry:%d, new index entry: %d\n", new_entry, new_index_tbl_entry);
1131 if (ipa_nati_post_ipv4_dma_cmd((uint8_t)(tbl_hdl - 1), new_entry)) {
1132 IPAERR("unable to post dma command\n");
1133 return -EIO;
1134 }
1135
1136 /* Generate rule handle */
1137 *rule_hdl = ipa_nati_make_rule_hdl((uint16_t)tbl_hdl, new_entry);
1138 if (!(*rule_hdl)) {
1139 IPAERR("unable to generate rule handle\n");
1140 return -EINVAL;
1141 }
1142
1143 #ifdef NAT_DUMP
1144 ipa_nat_dump_ipv4_table(tbl_hdl);
1145 #endif
1146
1147 return 0;
1148 }
1149
ipa_nati_generate_rule(uint32_t tbl_hdl,const ipa_nat_ipv4_rule * clnt_rule,struct ipa_nat_sw_rule * rule,struct ipa_nat_indx_tbl_sw_rule * index_sw_rule,uint16_t * tbl_entry,uint16_t * indx_tbl_entry)1150 int ipa_nati_generate_rule(uint32_t tbl_hdl,
1151 const ipa_nat_ipv4_rule *clnt_rule,
1152 struct ipa_nat_sw_rule *rule,
1153 struct ipa_nat_indx_tbl_sw_rule *index_sw_rule,
1154 uint16_t *tbl_entry,
1155 uint16_t *indx_tbl_entry)
1156 {
1157 struct ipa_nat_ip4_table_cache *tbl_ptr;
1158 uint16_t tmp;
1159
1160 if (NULL == clnt_rule || NULL == index_sw_rule ||
1161 NULL == rule || NULL == tbl_entry ||
1162 NULL == indx_tbl_entry) {
1163 IPAERR("invalid parameters\n");
1164 return -EINVAL;
1165 }
1166
1167 tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_hdl-1];
1168
1169 *tbl_entry = ipa_nati_generate_tbl_rule(clnt_rule,
1170 rule,
1171 tbl_ptr);
1172 if (IPA_NAT_INVALID_NAT_ENTRY == *tbl_entry) {
1173 IPAERR("unable to generate table entry\n");
1174 return -EINVAL;
1175 }
1176
1177 index_sw_rule->tbl_entry = *tbl_entry;
1178 *indx_tbl_entry = ipa_nati_generate_index_rule(clnt_rule,
1179 index_sw_rule,
1180 tbl_ptr);
1181 if (IPA_NAT_INVALID_NAT_ENTRY == *indx_tbl_entry) {
1182 IPAERR("unable to generate index table entry\n");
1183 return -EINVAL;
1184 }
1185
1186 rule->indx_tbl_entry = *indx_tbl_entry;
1187 if (*indx_tbl_entry >= tbl_ptr->table_entries) {
1188 tmp = *indx_tbl_entry - tbl_ptr->table_entries;
1189 tbl_ptr->index_expn_table_meta[tmp].prev_index = index_sw_rule->prev_index;
1190 }
1191
1192 return 0;
1193 }
1194
ipa_nati_generate_tbl_rule(const ipa_nat_ipv4_rule * clnt_rule,struct ipa_nat_sw_rule * sw_rule,struct ipa_nat_ip4_table_cache * tbl_ptr)1195 uint16_t ipa_nati_generate_tbl_rule(const ipa_nat_ipv4_rule *clnt_rule,
1196 struct ipa_nat_sw_rule *sw_rule,
1197 struct ipa_nat_ip4_table_cache *tbl_ptr)
1198 {
1199 uint32_t pub_ip_addr;
1200 uint16_t prev = 0, nxt_indx = 0, new_entry;
1201 struct ipa_nat_rule *tbl = NULL, *expn_tbl = NULL;
1202
1203 pub_ip_addr = pdns[clnt_rule->pdn_index].public_ip;
1204
1205 tbl = (struct ipa_nat_rule *)tbl_ptr->ipv4_rules_addr;
1206 expn_tbl = (struct ipa_nat_rule *)tbl_ptr->ipv4_expn_rules_addr;
1207
1208 /* copy the values from client rule to sw rule */
1209 sw_rule->private_ip = clnt_rule->private_ip;
1210 sw_rule->private_port = clnt_rule->private_port;
1211 sw_rule->protocol = clnt_rule->protocol;
1212 sw_rule->public_port = clnt_rule->public_port;
1213 sw_rule->target_ip = clnt_rule->target_ip;
1214 sw_rule->target_port = clnt_rule->target_port;
1215 sw_rule->pdn_index = clnt_rule->pdn_index;
1216
1217 /* consider only public and private ip fields */
1218 sw_rule->ip_chksum = ipa_nati_calc_ip_cksum(pub_ip_addr,
1219 clnt_rule->private_ip);
1220
1221 if (IPPROTO_TCP == sw_rule->protocol ||
1222 IPPROTO_UDP == sw_rule->protocol) {
1223 /* consider public and private ip & port fields */
1224 sw_rule->tcp_udp_chksum = ipa_nati_calc_tcp_udp_cksum(
1225 pub_ip_addr,
1226 clnt_rule->public_port,
1227 clnt_rule->private_ip,
1228 clnt_rule->private_port);
1229 }
1230
1231 sw_rule->rsvd1 = 0;
1232 sw_rule->enable = IPA_NAT_FLAG_DISABLE_BIT;
1233 sw_rule->next_index = 0;
1234
1235 /*
1236 SW sets this timer to 0.
1237 The assumption is that 0 is an invalid clock value and no clock
1238 wraparounds are expected
1239 */
1240 sw_rule->time_stamp = 0;
1241 sw_rule->rsvd2 = 0;
1242 sw_rule->rsvd3 = 0;
1243 sw_rule->prev_index = 0;
1244 sw_rule->indx_tbl_entry = 0;
1245
1246 new_entry = dst_hash(pub_ip_addr, clnt_rule->target_ip,
1247 clnt_rule->target_port,
1248 clnt_rule->public_port,
1249 clnt_rule->protocol,
1250 tbl_ptr->table_entries-1);
1251
1252 /* check whether there is any collision
1253 if no collision return */
1254 if (!Read16BitFieldValue(tbl[new_entry].ip_cksm_enbl,
1255 ENABLE_FIELD)) {
1256 sw_rule->prev_index = 0;
1257 IPADBG("Destination Nat New Entry Index %d\n", new_entry);
1258 return new_entry;
1259 }
1260
1261 /* First collision */
1262 if (Read16BitFieldValue(tbl[new_entry].nxt_indx_pub_port,
1263 NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
1264 sw_rule->prev_index = new_entry;
1265 } else { /* check for more than one collision */
1266 /* Find the IPA_NAT_DEL_TYPE_LAST entry in list */
1267 nxt_indx = Read16BitFieldValue(tbl[new_entry].nxt_indx_pub_port,
1268 NEXT_INDEX_FIELD);
1269
1270 while (nxt_indx != IPA_NAT_INVALID_NAT_ENTRY) {
1271 prev = nxt_indx;
1272
1273 nxt_indx -= tbl_ptr->table_entries;
1274 nxt_indx = Read16BitFieldValue(expn_tbl[nxt_indx].nxt_indx_pub_port,
1275 NEXT_INDEX_FIELD);
1276
1277 /* Handling error case */
1278 if (prev == nxt_indx) {
1279 IPAERR("Error: Prev index:%d and next:%d index should not be same\n", prev, nxt_indx);
1280 return IPA_NAT_INVALID_NAT_ENTRY;
1281 }
1282 }
1283
1284 sw_rule->prev_index = prev;
1285 }
1286
1287 /* On collision check for the free entry in expansion table */
1288 new_entry = ipa_nati_expn_tbl_free_entry(expn_tbl,
1289 tbl_ptr->expn_table_entries);
1290
1291 if (IPA_NAT_INVALID_NAT_ENTRY == new_entry) {
1292 /* Expansion table is full return*/
1293 IPAERR("Expansion table is full\n");
1294 IPAERR("Current Table: %d & Expn Entries: %d\n",
1295 tbl_ptr->cur_tbl_cnt, tbl_ptr->cur_expn_tbl_cnt);
1296 return IPA_NAT_INVALID_NAT_ENTRY;
1297 }
1298 new_entry += tbl_ptr->table_entries;
1299
1300 IPADBG("new entry index %d\n", new_entry);
1301 return new_entry;
1302 }
1303
1304 /* returns expn table entry index */
ipa_nati_expn_tbl_free_entry(struct ipa_nat_rule * expn_tbl,uint16_t size)1305 uint16_t ipa_nati_expn_tbl_free_entry(struct ipa_nat_rule *expn_tbl,
1306 uint16_t size)
1307 {
1308 int cnt;
1309
1310 for (cnt = 1; cnt < size; cnt++) {
1311 if (!Read16BitFieldValue(expn_tbl[cnt].ip_cksm_enbl,
1312 ENABLE_FIELD)) {
1313 IPADBG("new expansion table entry index %d\n", cnt);
1314 return cnt;
1315 }
1316 }
1317
1318 IPAERR("nat expansion table is full\n");
1319 return 0;
1320 }
1321
ipa_nati_generate_index_rule(const ipa_nat_ipv4_rule * clnt_rule,struct ipa_nat_indx_tbl_sw_rule * sw_rule,struct ipa_nat_ip4_table_cache * tbl_ptr)1322 uint16_t ipa_nati_generate_index_rule(const ipa_nat_ipv4_rule *clnt_rule,
1323 struct ipa_nat_indx_tbl_sw_rule *sw_rule,
1324 struct ipa_nat_ip4_table_cache *tbl_ptr)
1325 {
1326 struct ipa_nat_indx_tbl_rule *indx_tbl, *indx_expn_tbl;
1327 uint16_t prev = 0, nxt_indx = 0, new_entry;
1328
1329 indx_tbl =
1330 (struct ipa_nat_indx_tbl_rule *)tbl_ptr->index_table_addr;
1331 indx_expn_tbl =
1332 (struct ipa_nat_indx_tbl_rule *)tbl_ptr->index_table_expn_addr;
1333
1334 new_entry = src_hash(clnt_rule->private_ip,
1335 clnt_rule->private_port,
1336 clnt_rule->target_ip,
1337 clnt_rule->target_port,
1338 clnt_rule->protocol,
1339 tbl_ptr->table_entries-1);
1340
1341 /* check whether there is any collision
1342 if no collision return */
1343 if (!Read16BitFieldValue(indx_tbl[new_entry].tbl_entry_nxt_indx,
1344 INDX_TBL_TBL_ENTRY_FIELD)) {
1345 sw_rule->prev_index = 0;
1346 IPADBG("Source Nat Index Table Entry %d\n", new_entry);
1347 return new_entry;
1348 }
1349
1350 /* check for more than one collision */
1351 if (Read16BitFieldValue(indx_tbl[new_entry].tbl_entry_nxt_indx,
1352 INDX_TBL_NEXT_INDEX_FILED) == IPA_NAT_INVALID_NAT_ENTRY) {
1353 sw_rule->prev_index = new_entry;
1354 IPADBG("First collosion. Entry %d\n", new_entry);
1355 } else {
1356 /* Find the IPA_NAT_DEL_TYPE_LAST entry in list */
1357 nxt_indx = Read16BitFieldValue(indx_tbl[new_entry].tbl_entry_nxt_indx,
1358 INDX_TBL_NEXT_INDEX_FILED);
1359
1360 while (nxt_indx != IPA_NAT_INVALID_NAT_ENTRY) {
1361 prev = nxt_indx;
1362
1363 nxt_indx -= tbl_ptr->table_entries;
1364 nxt_indx = Read16BitFieldValue(indx_expn_tbl[nxt_indx].tbl_entry_nxt_indx,
1365 INDX_TBL_NEXT_INDEX_FILED);
1366
1367 /* Handling error case */
1368 if (prev == nxt_indx) {
1369 IPAERR("Error: Prev:%d and next:%d index should not be same\n", prev, nxt_indx);
1370 return IPA_NAT_INVALID_NAT_ENTRY;
1371 }
1372 }
1373
1374 sw_rule->prev_index = prev;
1375 }
1376
1377 /* On collision check for the free entry in expansion table */
1378 new_entry = ipa_nati_index_expn_get_free_entry(indx_expn_tbl,
1379 tbl_ptr->expn_table_entries);
1380
1381 if (IPA_NAT_INVALID_NAT_ENTRY == new_entry) {
1382 /* Expansion table is full return*/
1383 IPAERR("Index expansion table is full\n");
1384 IPAERR("Current Table: %d & Expn Entries: %d\n",
1385 tbl_ptr->cur_tbl_cnt, tbl_ptr->cur_expn_tbl_cnt);
1386 return IPA_NAT_INVALID_NAT_ENTRY;
1387 }
1388 new_entry += tbl_ptr->table_entries;
1389
1390
1391 if (sw_rule->prev_index == new_entry) {
1392 IPAERR("Error: prev_entry:%d ", sw_rule->prev_index);
1393 IPAERR("and new_entry:%d should not be same ", new_entry);
1394 IPAERR("infinite loop detected\n");
1395 return IPA_NAT_INVALID_NAT_ENTRY;
1396 }
1397
1398 IPADBG("index table entry %d\n", new_entry);
1399 return new_entry;
1400 }
1401
1402 /* returns index expn table entry index */
ipa_nati_index_expn_get_free_entry(struct ipa_nat_indx_tbl_rule * indx_tbl,uint16_t size)1403 uint16_t ipa_nati_index_expn_get_free_entry(
1404 struct ipa_nat_indx_tbl_rule *indx_tbl,
1405 uint16_t size)
1406 {
1407 int cnt;
1408 for (cnt = 1; cnt < size; cnt++) {
1409 if (!Read16BitFieldValue(indx_tbl[cnt].tbl_entry_nxt_indx,
1410 INDX_TBL_TBL_ENTRY_FIELD)) {
1411 return cnt;
1412 }
1413 }
1414
1415 IPAERR("nat index expansion table is full\n");
1416 return 0;
1417 }
1418
ipa_nati_write_next_index(uint8_t tbl_indx,nat_table_type tbl_type,uint16_t value,uint32_t offset)1419 void ipa_nati_write_next_index(uint8_t tbl_indx,
1420 nat_table_type tbl_type,
1421 uint16_t value,
1422 uint32_t offset)
1423 {
1424 struct ipa_ioc_nat_dma_cmd *cmd;
1425
1426 IPADBG("Updating next index field of table %d on collosion using dma\n", tbl_type);
1427 IPADBG("table index: %d, value: %d offset;%d\n", tbl_indx, value, offset);
1428
1429 cmd = (struct ipa_ioc_nat_dma_cmd *)
1430 malloc(sizeof(struct ipa_ioc_nat_dma_cmd)+
1431 sizeof(struct ipa_ioc_nat_dma_one));
1432 if (NULL == cmd) {
1433 IPAERR("unable to allocate memory\n");
1434 return;
1435 }
1436
1437 cmd->dma[0].table_index = tbl_indx;
1438 cmd->dma[0].base_addr = tbl_type;
1439 cmd->dma[0].data = value;
1440 cmd->dma[0].offset = offset;
1441
1442 cmd->entries = 1;
1443 if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_DMA, cmd)) {
1444 perror("ipa_nati_post_ipv4_dma_cmd(): ioctl error value");
1445 IPAERR("unable to call dma icotl to update next index\n");
1446 IPAERR("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
1447 goto fail;
1448 }
1449
1450 fail:
1451 free(cmd);
1452
1453 return;
1454 }
1455
ipa_nati_copy_ipv4_rule_to_hw(struct ipa_nat_ip4_table_cache * ipv4_cache,struct ipa_nat_sw_rule * rule,uint16_t entry,uint8_t tbl_index)1456 void ipa_nati_copy_ipv4_rule_to_hw(
1457 struct ipa_nat_ip4_table_cache *ipv4_cache,
1458 struct ipa_nat_sw_rule *rule,
1459 uint16_t entry, uint8_t tbl_index)
1460 {
1461 struct ipa_nat_rule *tbl_ptr;
1462 uint16_t prev_entry = rule->prev_index;
1463 nat_table_type tbl_type;
1464 uint32_t offset = 0;
1465
1466 if (entry < ipv4_cache->table_entries) {
1467 tbl_ptr = (struct ipa_nat_rule *)ipv4_cache->ipv4_rules_addr;
1468
1469 memcpy(&tbl_ptr[entry],
1470 rule,
1471 sizeof(struct ipa_nat_rule));
1472 } else {
1473 tbl_ptr = (struct ipa_nat_rule *)ipv4_cache->ipv4_expn_rules_addr;
1474 memcpy(&tbl_ptr[entry - ipv4_cache->table_entries],
1475 rule,
1476 sizeof(struct ipa_nat_rule));
1477 }
1478
1479 /* Update the previos entry next_index */
1480 if (IPA_NAT_INVALID_NAT_ENTRY != prev_entry) {
1481
1482 if (prev_entry < ipv4_cache->table_entries) {
1483 tbl_type = IPA_NAT_BASE_TBL;
1484 tbl_ptr = (struct ipa_nat_rule *)ipv4_cache->ipv4_rules_addr;
1485 } else {
1486 tbl_type = IPA_NAT_EXPN_TBL;
1487 /* tbp_ptr is already pointing to expansion table
1488 no need to initialize it */
1489 prev_entry = prev_entry - ipv4_cache->table_entries;
1490 }
1491
1492 offset = ipa_nati_get_entry_offset(ipv4_cache, tbl_type, prev_entry);
1493 offset += IPA_NAT_RULE_NEXT_FIELD_OFFSET;
1494
1495 ipa_nati_write_next_index(tbl_index, tbl_type, entry, offset);
1496 }
1497
1498 return;
1499 }
1500
ipa_nati_copy_ipv4_index_rule_to_hw(struct ipa_nat_ip4_table_cache * ipv4_cache,struct ipa_nat_indx_tbl_sw_rule * indx_sw_rule,uint16_t entry,uint8_t tbl_index)1501 void ipa_nati_copy_ipv4_index_rule_to_hw(
1502 struct ipa_nat_ip4_table_cache *ipv4_cache,
1503 struct ipa_nat_indx_tbl_sw_rule *indx_sw_rule,
1504 uint16_t entry,
1505 uint8_t tbl_index)
1506 {
1507 struct ipa_nat_indx_tbl_rule *tbl_ptr;
1508 struct ipa_nat_sw_indx_tbl_rule sw_rule;
1509 uint16_t prev_entry = indx_sw_rule->prev_index;
1510 nat_table_type tbl_type;
1511 uint16_t offset = 0;
1512
1513 sw_rule.next_index = indx_sw_rule->next_index;
1514 sw_rule.tbl_entry = indx_sw_rule->tbl_entry;
1515
1516 if (entry < ipv4_cache->table_entries) {
1517 tbl_ptr = (struct ipa_nat_indx_tbl_rule *)ipv4_cache->index_table_addr;
1518
1519 memcpy(&tbl_ptr[entry],
1520 &sw_rule,
1521 sizeof(struct ipa_nat_indx_tbl_rule));
1522 } else {
1523 tbl_ptr = (struct ipa_nat_indx_tbl_rule *)ipv4_cache->index_table_expn_addr;
1524
1525 memcpy(&tbl_ptr[entry - ipv4_cache->table_entries],
1526 &sw_rule,
1527 sizeof(struct ipa_nat_indx_tbl_rule));
1528 }
1529
1530 /* Update the next field of previous entry on collosion */
1531 if (IPA_NAT_INVALID_NAT_ENTRY != prev_entry) {
1532 if (prev_entry < ipv4_cache->table_entries) {
1533 tbl_type = IPA_NAT_INDX_TBL;
1534 tbl_ptr = (struct ipa_nat_indx_tbl_rule *)ipv4_cache->index_table_addr;
1535 } else {
1536 tbl_type = IPA_NAT_INDEX_EXPN_TBL;
1537 /* tbp_ptr is already pointing to expansion table
1538 no need to initialize it */
1539 prev_entry = prev_entry - ipv4_cache->table_entries;
1540 }
1541
1542 offset = ipa_nati_get_index_entry_offset(ipv4_cache, tbl_type, prev_entry);
1543 offset += IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
1544
1545 IPADBG("Updating next index field of index table on collosion using dma()\n");
1546 ipa_nati_write_next_index(tbl_index, tbl_type, entry, offset);
1547 }
1548
1549 return;
1550 }
1551
ipa_nati_post_ipv4_dma_cmd(uint8_t tbl_indx,uint16_t entry)1552 int ipa_nati_post_ipv4_dma_cmd(uint8_t tbl_indx,
1553 uint16_t entry)
1554 {
1555 struct ipa_ioc_nat_dma_cmd *cmd;
1556 struct ipa_nat_rule *tbl_ptr;
1557 uint32_t offset = ipv4_nat_cache.ip4_tbl[tbl_indx].tbl_addr_offset;
1558 int ret = 0;
1559
1560 cmd = (struct ipa_ioc_nat_dma_cmd *)
1561 malloc(sizeof(struct ipa_ioc_nat_dma_cmd)+
1562 sizeof(struct ipa_ioc_nat_dma_one));
1563 if (NULL == cmd) {
1564 IPAERR("unable to allocate memory\n");
1565 return -ENOMEM;
1566 }
1567
1568 if (entry < ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries) {
1569 tbl_ptr =
1570 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr;
1571
1572 cmd->dma[0].table_index = tbl_indx;
1573 cmd->dma[0].base_addr = IPA_NAT_BASE_TBL;
1574 cmd->dma[0].data = IPA_NAT_FLAG_ENABLE_BIT_MASK;
1575
1576 cmd->dma[0].offset = (char *)&tbl_ptr[entry] - (char *)tbl_ptr;
1577 cmd->dma[0].offset += IPA_NAT_RULE_FLAG_FIELD_OFFSET;
1578 } else {
1579 tbl_ptr =
1580 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_expn_rules_addr;
1581 entry = entry - ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries;
1582
1583 cmd->dma[0].table_index = tbl_indx;
1584 cmd->dma[0].base_addr = IPA_NAT_EXPN_TBL;
1585 cmd->dma[0].data = IPA_NAT_FLAG_ENABLE_BIT_MASK;
1586
1587 cmd->dma[0].offset = (char *)&tbl_ptr[entry] - (char *)tbl_ptr;
1588 cmd->dma[0].offset += IPA_NAT_RULE_FLAG_FIELD_OFFSET;
1589 cmd->dma[0].offset += offset;
1590 }
1591
1592 cmd->entries = 1;
1593 if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_DMA, cmd)) {
1594 perror("ipa_nati_post_ipv4_dma_cmd(): ioctl error value");
1595 IPAERR("unable to call dma icotl\n");
1596 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
1597 ret = -EIO;
1598 goto fail;
1599 }
1600 IPADBG("posted IPA_IOC_NAT_DMA to kernel successfully during add operation\n");
1601
1602
1603 fail:
1604 free(cmd);
1605
1606 return ret;
1607 }
1608
1609
ipa_nati_del_ipv4_rule(uint32_t tbl_hdl,uint32_t rule_hdl)1610 int ipa_nati_del_ipv4_rule(uint32_t tbl_hdl,
1611 uint32_t rule_hdl)
1612 {
1613 uint8_t expn_tbl;
1614 uint16_t tbl_entry;
1615 struct ipa_nat_ip4_table_cache *tbl_ptr;
1616 del_type rule_pos;
1617 uint8_t tbl_indx = (uint8_t)(tbl_hdl - 1);
1618 int ret;
1619
1620 /* Parse the rule handle */
1621 ipa_nati_parse_ipv4_rule_hdl(tbl_indx, (uint16_t)rule_hdl,
1622 &expn_tbl, &tbl_entry);
1623 if (IPA_NAT_INVALID_NAT_ENTRY == tbl_entry) {
1624 IPAERR("Invalid Rule Entry\n");
1625 ret = -EINVAL;
1626 goto fail;
1627 }
1628
1629 if (pthread_mutex_lock(&nat_mutex) != 0) {
1630 ret = -1;
1631 goto mutex_lock_error;
1632 }
1633
1634 IPADBG("Delete below rule\n");
1635 IPADBG("tbl_entry:%d expn_tbl:%d\n", tbl_entry, expn_tbl);
1636
1637 tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_indx];
1638 if (!tbl_ptr->valid) {
1639 IPAERR("invalid table handle\n");
1640 ret = -EINVAL;
1641 if (pthread_mutex_unlock(&nat_mutex) != 0)
1642 goto mutex_unlock_error;
1643 goto fail;
1644 }
1645
1646 ipa_nati_find_rule_pos(tbl_ptr, expn_tbl,
1647 tbl_entry, &rule_pos);
1648 IPADBG("rule_pos:%d\n", rule_pos);
1649
1650 if (ipa_nati_post_del_dma_cmd(tbl_indx, tbl_entry,
1651 expn_tbl, rule_pos)) {
1652 ret = -EINVAL;
1653 if (pthread_mutex_unlock(&nat_mutex) != 0)
1654 goto mutex_unlock_error;
1655 goto fail;
1656 }
1657
1658 ipa_nati_del_dead_ipv4_head_nodes(tbl_indx);
1659
1660 /* Reset rule_id_array entry */
1661 ipv4_nat_cache.ip4_tbl[tbl_indx].rule_id_array[rule_hdl-1] =
1662 IPA_NAT_INVALID_NAT_ENTRY;
1663
1664 #ifdef NAT_DUMP
1665 IPADBG("Dumping Table after deleting rule\n");
1666 ipa_nat_dump_ipv4_table(tbl_hdl);
1667 #endif
1668
1669 if (pthread_mutex_unlock(&nat_mutex) != 0) {
1670 ret = -1;
1671 goto mutex_unlock_error;
1672 }
1673
1674 return 0;
1675
1676 mutex_lock_error:
1677 IPAERR("unable to lock the nat mutex\n");
1678 return ret;
1679
1680 mutex_unlock_error:
1681 IPAERR("unable to unlock the nat mutex\n");
1682
1683 fail:
1684 return ret;
1685 }
1686
ReorderCmds(struct ipa_ioc_nat_dma_cmd * cmd,int size)1687 void ReorderCmds(struct ipa_ioc_nat_dma_cmd *cmd, int size)
1688 {
1689 int indx_tbl_start = 0, cnt, cnt1;
1690 struct ipa_ioc_nat_dma_cmd *tmp;
1691
1692 IPADBG("called ReorderCmds() with entries :%d\n", cmd->entries);
1693
1694 for (cnt = 0; cnt < cmd->entries; cnt++) {
1695 if (cmd->dma[cnt].base_addr == IPA_NAT_INDX_TBL ||
1696 cmd->dma[cnt].base_addr == IPA_NAT_INDEX_EXPN_TBL) {
1697 indx_tbl_start = cnt;
1698 break;
1699 }
1700 }
1701
1702 if (indx_tbl_start == 0) {
1703 IPADBG("Reorder not needed\n");
1704 return;
1705 }
1706
1707 tmp = (struct ipa_ioc_nat_dma_cmd *)malloc(size);
1708 if (tmp == NULL) {
1709 IPAERR("unable to allocate memory\n");
1710 return;
1711 }
1712
1713 cnt1 = 0;
1714 tmp->entries = cmd->entries;
1715 for (cnt = indx_tbl_start; cnt < cmd->entries; cnt++) {
1716 tmp->dma[cnt1] = cmd->dma[cnt];
1717 cnt1++;
1718 }
1719
1720 for (cnt = 0; cnt < indx_tbl_start; cnt++) {
1721 tmp->dma[cnt1] = cmd->dma[cnt];
1722 cnt1++;
1723 }
1724
1725 memset(cmd, 0, size);
1726 memcpy(cmd, tmp, size);
1727 free(tmp);
1728
1729 return;
1730 }
1731
ipa_nati_post_del_dma_cmd(uint8_t tbl_indx,uint16_t cur_tbl_entry,uint8_t expn_tbl,del_type rule_pos)1732 int ipa_nati_post_del_dma_cmd(uint8_t tbl_indx,
1733 uint16_t cur_tbl_entry,
1734 uint8_t expn_tbl,
1735 del_type rule_pos)
1736 {
1737
1738 #define MAX_DMA_ENTRIES_FOR_DEL 3
1739
1740 struct ipa_nat_ip4_table_cache *cache_ptr;
1741 struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
1742 struct ipa_nat_rule *tbl_ptr;
1743 int ret = 0, size = 0;
1744
1745 uint16_t indx_tbl_entry = IPA_NAT_INVALID_NAT_ENTRY;
1746 del_type indx_rule_pos;
1747
1748 struct ipa_ioc_nat_dma_cmd *cmd;
1749 uint8_t no_of_cmds = 0;
1750
1751 uint16_t prev_entry = IPA_NAT_INVALID_NAT_ENTRY;
1752 uint16_t next_entry = IPA_NAT_INVALID_NAT_ENTRY;
1753 uint16_t indx_next_entry = IPA_NAT_INVALID_NAT_ENTRY;
1754 uint16_t indx_next_next_entry = IPA_NAT_INVALID_NAT_ENTRY;
1755 uint16_t table_entry;
1756
1757 size = sizeof(struct ipa_ioc_nat_dma_cmd)+
1758 (MAX_DMA_ENTRIES_FOR_DEL * sizeof(struct ipa_ioc_nat_dma_one));
1759
1760 cmd = (struct ipa_ioc_nat_dma_cmd *)malloc(size);
1761 if (NULL == cmd) {
1762 IPAERR("unable to allocate memory\n");
1763 return -ENOMEM;
1764 }
1765
1766 cache_ptr = &ipv4_nat_cache.ip4_tbl[tbl_indx];
1767 if (!expn_tbl) {
1768 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
1769 } else {
1770 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
1771 }
1772
1773
1774 if (!Read16BitFieldValue(tbl_ptr[cur_tbl_entry].ip_cksm_enbl,
1775 ENABLE_FIELD)) {
1776 IPAERR("Deleting invalid(not enabled) rule\n");
1777 ret = -EINVAL;
1778 goto fail;
1779 }
1780
1781 indx_tbl_entry =
1782 Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
1783 SW_SPEC_PARAM_INDX_TBL_ENTRY_FIELD);
1784
1785 /* ================================================
1786 Base Table rule Deletion
1787 ================================================*/
1788 /* Just delete the current rule by disabling the flag field */
1789 if (IPA_NAT_DEL_TYPE_ONLY_ONE == rule_pos) {
1790 cmd->dma[no_of_cmds].table_index = tbl_indx;
1791 cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
1792 cmd->dma[no_of_cmds].data = IPA_NAT_FLAG_DISABLE_BIT_MASK;
1793
1794 cmd->dma[no_of_cmds].offset =
1795 ipa_nati_get_entry_offset(cache_ptr,
1796 cmd->dma[no_of_cmds].base_addr,
1797 cur_tbl_entry);
1798 cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_FLAG_FIELD_OFFSET;
1799 }
1800
1801 /* Just update the protocol field to invalid */
1802 else if (IPA_NAT_DEL_TYPE_HEAD == rule_pos) {
1803 cmd->dma[no_of_cmds].table_index = tbl_indx;
1804 cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
1805 cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_PROTO_FIELD_VALUE;
1806
1807 cmd->dma[no_of_cmds].offset =
1808 ipa_nati_get_entry_offset(cache_ptr,
1809 cmd->dma[no_of_cmds].base_addr,
1810 cur_tbl_entry);
1811 cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_PROTO_FIELD_OFFSET;
1812
1813 IPADBG("writing invalid proto: 0x%x\n", cmd->dma[no_of_cmds].data);
1814 }
1815
1816 /*
1817 Update the previous entry of next_index field value
1818 with current entry next_index field value
1819 */
1820 else if (IPA_NAT_DEL_TYPE_MIDDLE == rule_pos) {
1821 prev_entry =
1822 Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
1823 SW_SPEC_PARAM_PREV_INDEX_FIELD);
1824
1825 cmd->dma[no_of_cmds].table_index = tbl_indx;
1826 cmd->dma[no_of_cmds].data =
1827 Read16BitFieldValue(tbl_ptr[cur_tbl_entry].nxt_indx_pub_port,
1828 NEXT_INDEX_FIELD);
1829
1830 cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
1831 if (prev_entry >= cache_ptr->table_entries) {
1832 cmd->dma[no_of_cmds].base_addr = IPA_NAT_EXPN_TBL;
1833 prev_entry -= cache_ptr->table_entries;
1834 }
1835
1836 cmd->dma[no_of_cmds].offset =
1837 ipa_nati_get_entry_offset(cache_ptr,
1838 cmd->dma[no_of_cmds].base_addr, prev_entry);
1839
1840 cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_NEXT_FIELD_OFFSET;
1841 }
1842
1843 /*
1844 Reset the previous entry of next_index field with 0
1845 */
1846 else if (IPA_NAT_DEL_TYPE_LAST == rule_pos) {
1847 prev_entry =
1848 Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
1849 SW_SPEC_PARAM_PREV_INDEX_FIELD);
1850
1851 cmd->dma[no_of_cmds].table_index = tbl_indx;
1852 cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_NAT_ENTRY;
1853
1854 cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
1855 if (prev_entry >= cache_ptr->table_entries) {
1856 cmd->dma[no_of_cmds].base_addr = IPA_NAT_EXPN_TBL;
1857 prev_entry -= cache_ptr->table_entries;
1858 }
1859
1860 cmd->dma[no_of_cmds].offset =
1861 ipa_nati_get_entry_offset(cache_ptr,
1862 cmd->dma[no_of_cmds].base_addr, prev_entry);
1863
1864 cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_NEXT_FIELD_OFFSET;
1865 }
1866
1867 /* ================================================
1868 Base Table rule Deletion End
1869 ================================================*/
1870
1871 /* ================================================
1872 Index Table rule Deletion
1873 ================================================*/
1874 ipa_nati_find_index_rule_pos(cache_ptr,
1875 indx_tbl_entry,
1876 &indx_rule_pos);
1877 IPADBG("Index table entry: 0x%x\n", indx_tbl_entry);
1878 IPADBG("and position: %d\n", indx_rule_pos);
1879 if (indx_tbl_entry >= cache_ptr->table_entries) {
1880 indx_tbl_entry -= cache_ptr->table_entries;
1881 indx_tbl_ptr =
1882 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
1883 } else {
1884 indx_tbl_ptr =
1885 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
1886 }
1887
1888 /* Just delete the current rule by resetting nat_table_index field to 0 */
1889 if (IPA_NAT_DEL_TYPE_ONLY_ONE == indx_rule_pos) {
1890 no_of_cmds++;
1891 cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1892 cmd->dma[no_of_cmds].table_index = tbl_indx;
1893 cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_NAT_ENTRY;
1894
1895 cmd->dma[no_of_cmds].offset =
1896 ipa_nati_get_index_entry_offset(cache_ptr,
1897 cmd->dma[no_of_cmds].base_addr,
1898 indx_tbl_entry);
1899
1900 cmd->dma[no_of_cmds].offset +=
1901 IPA_NAT_INDEX_RULE_NAT_INDEX_FIELD_OFFSET;
1902 }
1903
1904 /* copy the next entry values to current entry */
1905 else if (IPA_NAT_DEL_TYPE_HEAD == indx_rule_pos) {
1906 next_entry =
1907 Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
1908 INDX_TBL_NEXT_INDEX_FILED);
1909
1910 next_entry -= cache_ptr->table_entries;
1911
1912 no_of_cmds++;
1913 cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1914 cmd->dma[no_of_cmds].table_index = tbl_indx;
1915
1916 /* Copy the nat_table_index field value of next entry */
1917 indx_tbl_ptr =
1918 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
1919 cmd->dma[no_of_cmds].data =
1920 Read16BitFieldValue(indx_tbl_ptr[next_entry].tbl_entry_nxt_indx,
1921 INDX_TBL_TBL_ENTRY_FIELD);
1922
1923 cmd->dma[no_of_cmds].offset =
1924 ipa_nati_get_index_entry_offset(cache_ptr,
1925 cmd->dma[no_of_cmds].base_addr,
1926 indx_tbl_entry);
1927
1928 cmd->dma[no_of_cmds].offset +=
1929 IPA_NAT_INDEX_RULE_NAT_INDEX_FIELD_OFFSET;
1930
1931 /* Copy the next_index field value of next entry */
1932 no_of_cmds++;
1933 cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1934 cmd->dma[no_of_cmds].table_index = tbl_indx;
1935 cmd->dma[no_of_cmds].data =
1936 Read16BitFieldValue(indx_tbl_ptr[next_entry].tbl_entry_nxt_indx,
1937 INDX_TBL_NEXT_INDEX_FILED);
1938
1939 cmd->dma[no_of_cmds].offset =
1940 ipa_nati_get_index_entry_offset(cache_ptr,
1941 cmd->dma[no_of_cmds].base_addr, indx_tbl_entry);
1942
1943 cmd->dma[no_of_cmds].offset +=
1944 IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
1945 indx_next_entry = next_entry;
1946 }
1947
1948 /*
1949 Update the previous entry of next_index field value
1950 with current entry next_index field value
1951 */
1952 else if (IPA_NAT_DEL_TYPE_MIDDLE == indx_rule_pos) {
1953 prev_entry = cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index;
1954
1955 no_of_cmds++;
1956 cmd->dma[no_of_cmds].table_index = tbl_indx;
1957 cmd->dma[no_of_cmds].data =
1958 Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
1959 INDX_TBL_NEXT_INDEX_FILED);
1960
1961 cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1962 if (prev_entry >= cache_ptr->table_entries) {
1963 cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDEX_EXPN_TBL;
1964 prev_entry -= cache_ptr->table_entries;
1965 }
1966
1967 IPADBG("prev_entry: %d update with cur next_index: %d\n",
1968 prev_entry, cmd->dma[no_of_cmds].data);
1969 IPADBG("prev_entry: %d exist in table_type:%d\n",
1970 prev_entry, cmd->dma[no_of_cmds].base_addr);
1971
1972 cmd->dma[no_of_cmds].offset =
1973 ipa_nati_get_index_entry_offset(cache_ptr,
1974 cmd->dma[no_of_cmds].base_addr, prev_entry);
1975
1976 cmd->dma[no_of_cmds].offset +=
1977 IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
1978 }
1979
1980 /* Reset the previous entry next_index field with 0 */
1981 else if (IPA_NAT_DEL_TYPE_LAST == indx_rule_pos) {
1982 prev_entry = cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index;
1983
1984 no_of_cmds++;
1985 cmd->dma[no_of_cmds].table_index = tbl_indx;
1986 cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_NAT_ENTRY;
1987
1988 cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1989 if (prev_entry >= cache_ptr->table_entries) {
1990 cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDEX_EXPN_TBL;
1991 prev_entry -= cache_ptr->table_entries;
1992 }
1993
1994 IPADBG("Reseting prev_entry: %d next_index\n", prev_entry);
1995 IPADBG("prev_entry: %d exist in table_type:%d\n",
1996 prev_entry, cmd->dma[no_of_cmds].base_addr);
1997
1998 cmd->dma[no_of_cmds].offset =
1999 ipa_nati_get_index_entry_offset(cache_ptr,
2000 cmd->dma[no_of_cmds].base_addr, prev_entry);
2001
2002 cmd->dma[no_of_cmds].offset +=
2003 IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
2004 }
2005
2006 /* ================================================
2007 Index Table rule Deletion End
2008 ================================================*/
2009 cmd->entries = no_of_cmds + 1;
2010
2011 if (cmd->entries > 1) {
2012 ReorderCmds(cmd, size);
2013 }
2014 if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_DMA, cmd)) {
2015 perror("ipa_nati_post_del_dma_cmd(): ioctl error value");
2016 IPAERR("unable to post cmd\n");
2017 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
2018 ret = -EIO;
2019 goto fail;
2020 }
2021
2022 /* if entry exist in IPA_NAT_DEL_TYPE_MIDDLE of list
2023 Update the previous entry in sw specific parameters
2024 */
2025 if (IPA_NAT_DEL_TYPE_MIDDLE == rule_pos) {
2026 /* Retrieve the current entry prev_entry value */
2027 prev_entry =
2028 Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
2029 SW_SPEC_PARAM_PREV_INDEX_FIELD);
2030
2031 /* Retrieve the next entry */
2032 next_entry =
2033 Read16BitFieldValue(tbl_ptr[cur_tbl_entry].nxt_indx_pub_port,
2034 NEXT_INDEX_FIELD);
2035
2036 next_entry -= cache_ptr->table_entries;
2037 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
2038
2039 /* copy the current entry prev_entry value to next entry*/
2040 UpdateSwSpecParams(&tbl_ptr[next_entry],
2041 IPA_NAT_SW_PARAM_PREV_INDX_BYTE,
2042 prev_entry);
2043 }
2044
2045 /* Reset the other field values of current delete entry
2046 In case of IPA_NAT_DEL_TYPE_HEAD, don't reset */
2047 if (IPA_NAT_DEL_TYPE_HEAD != rule_pos) {
2048 memset(&tbl_ptr[cur_tbl_entry], 0, sizeof(struct ipa_nat_rule));
2049 }
2050
2051 if (indx_rule_pos == IPA_NAT_DEL_TYPE_HEAD) {
2052
2053 /* Update next next entry previous value to current
2054 entry as we moved the next entry values
2055 to current entry */
2056 indx_next_next_entry =
2057 Read16BitFieldValue(indx_tbl_ptr[indx_next_entry].tbl_entry_nxt_indx,
2058 INDX_TBL_NEXT_INDEX_FILED);
2059
2060 if (indx_next_next_entry != 0 &&
2061 indx_next_next_entry >= cache_ptr->table_entries) {
2062
2063 IPADBG("Next Next entry: %d\n", indx_next_next_entry);
2064 indx_next_next_entry -= cache_ptr->table_entries;
2065
2066 IPADBG("Updating entry: %d prev index to: %d\n",
2067 indx_next_next_entry, indx_tbl_entry);
2068 cache_ptr->index_expn_table_meta[indx_next_next_entry].prev_index =
2069 indx_tbl_entry;
2070 }
2071
2072 /* Now reset the next entry as we copied
2073 the next entry to current entry */
2074 IPADBG("Resetting, index table entry(Proper): %d\n",
2075 (cache_ptr->table_entries + indx_next_entry));
2076
2077 /* This resets both table entry and next index values */
2078 indx_tbl_ptr[indx_next_entry].tbl_entry_nxt_indx = 0;
2079
2080 /*
2081 In case of IPA_NAT_DEL_TYPE_HEAD, update the sw specific parameters
2082 (index table entry) of base table entry
2083 */
2084 indx_tbl_ptr =
2085 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
2086 table_entry =
2087 Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
2088 INDX_TBL_TBL_ENTRY_FIELD);
2089
2090 if (table_entry >= cache_ptr->table_entries) {
2091 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
2092 table_entry -= cache_ptr->table_entries;
2093 } else {
2094 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
2095 }
2096
2097 UpdateSwSpecParams(&tbl_ptr[table_entry],
2098 IPA_NAT_SW_PARAM_INDX_TBL_ENTRY_BYTE,
2099 indx_tbl_entry);
2100 } else {
2101 /* Update the prev_entry value (in index_expn_table_meta)
2102 for the next_entry in list with current entry prev_entry value
2103 */
2104 if (IPA_NAT_DEL_TYPE_MIDDLE == indx_rule_pos) {
2105 next_entry =
2106 Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
2107 INDX_TBL_NEXT_INDEX_FILED);
2108
2109 if (next_entry >= cache_ptr->table_entries) {
2110 next_entry -= cache_ptr->table_entries;
2111 }
2112
2113 cache_ptr->index_expn_table_meta[next_entry].prev_index =
2114 cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index;
2115
2116 cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index =
2117 IPA_NAT_INVALID_NAT_ENTRY;
2118 }
2119
2120 IPADBG("At, indx_tbl_entry value: %d\n", indx_tbl_entry);
2121 IPADBG("At, indx_tbl_entry member address: %p\n",
2122 &indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx);
2123
2124 indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx = 0;
2125
2126 }
2127
2128 fail:
2129 free(cmd);
2130
2131 return ret;
2132 }
2133
ipa_nati_find_index_rule_pos(struct ipa_nat_ip4_table_cache * cache_ptr,uint16_t tbl_entry,del_type * rule_pos)2134 void ipa_nati_find_index_rule_pos(
2135 struct ipa_nat_ip4_table_cache *cache_ptr,
2136 uint16_t tbl_entry,
2137 del_type *rule_pos)
2138 {
2139 struct ipa_nat_indx_tbl_rule *tbl_ptr;
2140
2141 if (tbl_entry >= cache_ptr->table_entries) {
2142 tbl_ptr =
2143 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
2144
2145 tbl_entry -= cache_ptr->table_entries;
2146 if (Read16BitFieldValue(tbl_ptr[tbl_entry].tbl_entry_nxt_indx,
2147 INDX_TBL_NEXT_INDEX_FILED) == IPA_NAT_INVALID_NAT_ENTRY) {
2148 *rule_pos = IPA_NAT_DEL_TYPE_LAST;
2149 } else {
2150 *rule_pos = IPA_NAT_DEL_TYPE_MIDDLE;
2151 }
2152 } else {
2153 tbl_ptr =
2154 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
2155
2156 if (Read16BitFieldValue(tbl_ptr[tbl_entry].tbl_entry_nxt_indx,
2157 INDX_TBL_NEXT_INDEX_FILED) == IPA_NAT_INVALID_NAT_ENTRY) {
2158 *rule_pos = IPA_NAT_DEL_TYPE_ONLY_ONE;
2159 } else {
2160 *rule_pos = IPA_NAT_DEL_TYPE_HEAD;
2161 }
2162 }
2163 }
2164
ipa_nati_find_rule_pos(struct ipa_nat_ip4_table_cache * cache_ptr,uint8_t expn_tbl,uint16_t tbl_entry,del_type * rule_pos)2165 void ipa_nati_find_rule_pos(struct ipa_nat_ip4_table_cache *cache_ptr,
2166 uint8_t expn_tbl,
2167 uint16_t tbl_entry,
2168 del_type *rule_pos)
2169 {
2170 struct ipa_nat_rule *tbl_ptr;
2171
2172 if (expn_tbl) {
2173 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
2174 if (Read16BitFieldValue(tbl_ptr[tbl_entry].nxt_indx_pub_port,
2175 NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
2176 *rule_pos = IPA_NAT_DEL_TYPE_LAST;
2177 } else {
2178 *rule_pos = IPA_NAT_DEL_TYPE_MIDDLE;
2179 }
2180 } else {
2181 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
2182 if (Read16BitFieldValue(tbl_ptr[tbl_entry].nxt_indx_pub_port,
2183 NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
2184 *rule_pos = IPA_NAT_DEL_TYPE_ONLY_ONE;
2185 } else {
2186 *rule_pos = IPA_NAT_DEL_TYPE_HEAD;
2187 }
2188 }
2189 }
2190
ipa_nati_del_dead_ipv4_head_nodes(uint8_t tbl_indx)2191 void ipa_nati_del_dead_ipv4_head_nodes(uint8_t tbl_indx)
2192 {
2193 struct ipa_nat_rule *tbl_ptr;
2194 uint16_t cnt;
2195
2196 tbl_ptr =
2197 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr;
2198
2199 for (cnt = 0;
2200 cnt < ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries;
2201 cnt++) {
2202
2203 if (Read8BitFieldValue(tbl_ptr[cnt].ts_proto,
2204 PROTOCOL_FIELD) == IPA_NAT_INVALID_PROTO_FIELD_CMP
2205 &&
2206 Read16BitFieldValue(tbl_ptr[cnt].nxt_indx_pub_port,
2207 NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
2208 /* Delete the IPA_NAT_DEL_TYPE_HEAD node */
2209 IPADBG("deleting the dead node 0x%x\n", cnt);
2210 memset(&tbl_ptr[cnt], 0, sizeof(struct ipa_nat_rule));
2211 }
2212 } /* end of for loop */
2213
2214 return;
2215 }
2216
2217
2218 /* ========================================================
2219 Debug functions
2220 ========================================================*/
2221 #ifdef NAT_DUMP
ipa_nat_dump_ipv4_table(uint32_t tbl_hdl)2222 void ipa_nat_dump_ipv4_table(uint32_t tbl_hdl)
2223 {
2224 struct ipa_nat_rule *tbl_ptr;
2225 struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
2226 int cnt;
2227 uint8_t atl_one = 0;
2228
2229 if (IPA_NAT_INVALID_NAT_ENTRY == tbl_hdl ||
2230 tbl_hdl > IPA_NAT_MAX_IP4_TBLS) {
2231 IPAERR("invalid table handle passed\n");
2232 return;
2233 }
2234
2235 /* Print ipv4 rules */
2236 IPADBG("Dumping ipv4 active rules:\n");
2237 tbl_ptr = (struct ipa_nat_rule *)
2238 ipv4_nat_cache.ip4_tbl[tbl_hdl-1].ipv4_rules_addr;
2239 for (cnt = 0;
2240 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
2241 cnt++) {
2242 if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
2243 ENABLE_FIELD)) {
2244 atl_one = 1;
2245 ipa_nati_print_rule(&tbl_ptr[cnt], cnt);
2246 }
2247 }
2248 if (!atl_one) {
2249 IPADBG("No active base rules, total: %d\n",
2250 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries);
2251 }
2252 atl_one = 0;
2253
2254 /* Print ipv4 expansion rules */
2255 IPADBG("Dumping ipv4 active expansion rules:\n");
2256 tbl_ptr = (struct ipa_nat_rule *)
2257 ipv4_nat_cache.ip4_tbl[tbl_hdl-1].ipv4_expn_rules_addr;
2258 for (cnt = 0;
2259 cnt <= ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
2260 cnt++) {
2261 if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
2262 ENABLE_FIELD)) {
2263 atl_one = 1;
2264 ipa_nati_print_rule(&tbl_ptr[cnt],
2265 (cnt + ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries));
2266 }
2267 }
2268 if (!atl_one) {
2269 IPADBG("No active base expansion rules, total: %d\n",
2270 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries);
2271 }
2272 atl_one = 0;
2273
2274 /* Print ipv4 index rules */
2275 IPADBG("Dumping ipv4 index active rules:\n");
2276 indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
2277 ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_table_addr;
2278 for (cnt = 0;
2279 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
2280 cnt++) {
2281 if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
2282 INDX_TBL_TBL_ENTRY_FIELD)) {
2283 atl_one = 1;
2284 ipa_nati_print_index_rule(&indx_tbl_ptr[cnt], cnt, 0);
2285 }
2286 }
2287 if (!atl_one) {
2288 IPADBG("No active index table rules, total:%d\n",
2289 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries);
2290 }
2291 atl_one = 0;
2292
2293
2294 /* Print ipv4 index expansion rules */
2295 IPADBG("Dumping ipv4 index expansion active rules:\n");
2296 indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
2297 ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_table_expn_addr;
2298 for (cnt = 0;
2299 cnt <= ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
2300 cnt++) {
2301 if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
2302 INDX_TBL_TBL_ENTRY_FIELD)) {
2303 atl_one = 1;
2304 ipa_nati_print_index_rule(&indx_tbl_ptr[cnt],
2305 (cnt + ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries),
2306 ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_expn_table_meta[cnt].prev_index);
2307 }
2308 }
2309 if (!atl_one) {
2310 IPADBG("No active index expansion rules, total:%d\n",
2311 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries);
2312 }
2313 atl_one = 0;
2314
2315 }
2316
ipa_nati_print_rule(struct ipa_nat_rule * param,uint32_t rule_id)2317 void ipa_nati_print_rule(
2318 struct ipa_nat_rule *param,
2319 uint32_t rule_id)
2320 {
2321 struct ipa_nat_sw_rule sw_rule;
2322 memcpy(&sw_rule, param, sizeof(sw_rule));
2323 uint32_t ip_addr;
2324
2325 IPADUMP("rule-id:%d ", rule_id);
2326 ip_addr = sw_rule.target_ip;
2327 IPADUMP("Trgt-IP:%d.%d.%d.%d ",
2328 ((ip_addr & 0xFF000000) >> 24), ((ip_addr & 0x00FF0000) >> 16),
2329 ((ip_addr & 0x0000FF00) >> 8), ((ip_addr & 0x000000FF)));
2330
2331 IPADUMP("Trgt-Port:%d Priv-Port:%d ", sw_rule.target_port, sw_rule.private_port);
2332
2333 ip_addr = sw_rule.private_ip;
2334 IPADUMP("Priv-IP:%d.%d.%d.%d ",
2335 ((ip_addr & 0xFF000000) >> 24), ((ip_addr & 0x00FF0000) >> 16),
2336 ((ip_addr & 0x0000FF00) >> 8), ((ip_addr & 0x000000FF)));
2337
2338 IPADUMP("Pub-Port:%d Nxt-indx:%d ", sw_rule.public_port, sw_rule.next_index);
2339 IPADUMP("IP-cksm-delta:0x%x En-bit:0x%x ", sw_rule.ip_chksum, sw_rule.enable);
2340 IPADUMP("TS:0x%x Proto:0x%x ", sw_rule.time_stamp, sw_rule.protocol);
2341 IPADUMP("Prv-indx:%d indx_tbl_entry:%d ", sw_rule.prev_index, sw_rule.indx_tbl_entry);
2342 IPADUMP("Tcp-udp-cksum-delta:0x%x", sw_rule.tcp_udp_chksum);
2343 IPADUMP("\n");
2344 return;
2345 }
2346
ipa_nati_print_index_rule(struct ipa_nat_indx_tbl_rule * param,uint32_t rule_id,uint16_t prev_indx)2347 void ipa_nati_print_index_rule(
2348 struct ipa_nat_indx_tbl_rule *param,
2349 uint32_t rule_id, uint16_t prev_indx)
2350 {
2351 struct ipa_nat_sw_indx_tbl_rule sw_rule;
2352 memcpy(&sw_rule, param, sizeof(sw_rule));
2353
2354 IPADUMP("rule-id:%d Table_entry:%d Next_index:%d, prev_indx:%d",
2355 rule_id, sw_rule.tbl_entry, sw_rule.next_index, prev_indx);
2356 IPADUMP("\n");
2357 return;
2358 }
2359
ipa_nati_query_nat_rules(uint32_t tbl_hdl,nat_table_type tbl_type)2360 int ipa_nati_query_nat_rules(
2361 uint32_t tbl_hdl,
2362 nat_table_type tbl_type)
2363 {
2364 struct ipa_nat_rule *tbl_ptr;
2365 struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
2366 int cnt = 0, ret = 0;
2367
2368 if (IPA_NAT_INVALID_NAT_ENTRY == tbl_hdl ||
2369 tbl_hdl > IPA_NAT_MAX_IP4_TBLS) {
2370 IPAERR("invalid table handle passed\n");
2371 return ret;
2372 }
2373
2374 /* Print ipv4 rules */
2375 if (tbl_type == IPA_NAT_BASE_TBL) {
2376 IPADBG("Counting ipv4 active rules:\n");
2377 tbl_ptr = (struct ipa_nat_rule *)
2378 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].ipv4_rules_addr;
2379 for (cnt = 0;
2380 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
2381 cnt++) {
2382 if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
2383 ENABLE_FIELD)) {
2384 ret++;
2385 }
2386 }
2387 if (!ret) {
2388 IPADBG("No active base rules\n");
2389 }
2390
2391 IPADBG("Number of active base rules: %d\n", ret);
2392 }
2393
2394 /* Print ipv4 expansion rules */
2395 if (tbl_type == IPA_NAT_EXPN_TBL) {
2396 IPADBG("Counting ipv4 active expansion rules:\n");
2397 tbl_ptr = (struct ipa_nat_rule *)
2398 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].ipv4_expn_rules_addr;
2399 for (cnt = 0;
2400 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
2401 cnt++) {
2402 if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
2403 ENABLE_FIELD)) {
2404 ret++;
2405 }
2406 }
2407 if (!ret) {
2408 IPADBG("No active base expansion rules\n");
2409 }
2410
2411 IPADBG("Number of active base expansion rules: %d\n", ret);
2412 }
2413
2414 /* Print ipv4 index rules */
2415 if (tbl_type == IPA_NAT_INDX_TBL) {
2416 IPADBG("Counting ipv4 index active rules:\n");
2417 indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
2418 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].index_table_addr;
2419 for (cnt = 0;
2420 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
2421 cnt++) {
2422 if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
2423 INDX_TBL_TBL_ENTRY_FIELD)) {
2424 ret++;
2425 }
2426 }
2427 if (!ret) {
2428 IPADBG("No active index table rules\n");
2429 }
2430
2431 IPADBG("Number of active index table rules: %d\n", ret);
2432 }
2433
2434 /* Print ipv4 index expansion rules */
2435 if (tbl_type == IPA_NAT_INDEX_EXPN_TBL) {
2436 IPADBG("Counting ipv4 index expansion active rules:\n");
2437 indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
2438 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].index_table_expn_addr;
2439 for (cnt = 0;
2440 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
2441 cnt++) {
2442 if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
2443 INDX_TBL_TBL_ENTRY_FIELD)) {
2444 ret++;
2445 }
2446 }
2447
2448 if (!ret)
2449 IPADBG("No active index expansion rules\n");
2450
2451 IPADBG("Number of active index expansion rules: %d\n", ret);
2452 }
2453
2454 return ret;
2455 }
2456 #endif
2457