• Home
  • Raw
  • Download

Lines Matching +full:dma +full:- +full:mem

1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2018-2020 Linaro Ltd.
15 #include <linux/dma-mapping.h>
30 * The IPA has tables defined in its local (IPA-resident) memory that define
32 * endian 64-bit "slot" that holds the address of a rule definition. (The
33 * size of these slots is 64 bits regardless of the host DMA address size.)
41 * an object (such as a route or filter table) in IPA-resident memory must
42 * 128-byte aligned. An object in system memory (such as a route or filter
43 * rule) must be at an 8-byte aligned address. We currently only place
46 * A rule consists of a contiguous block of 32-bit values terminated with
52 * not all TX endpoints support filtering. The first 64-bit slot in a
54 * the table. The low-order bit (bit 0) in this bitmap represents a
69 * ----------------------
71 * |--------------------|
72 * 1st endpoint | 0x000123456789abc0 | DMA address for modem endpoint 2 rule
73 * |--------------------|
74 * 2nd endpoint | 0x000123456789abf0 | DMA address for AP endpoint 5 rule
75 * |--------------------|
77 * |--------------------|
79 * |--------------------|
81 * ----------------------
90 * ----------------------
91 * 1st modem route | 0x0001234500001100 | DMA address for first route rule
92 * |--------------------|
93 * 2nd modem route | 0x0001234500001140 | DMA address for second route rule
94 * |--------------------|
96 * |--------------------|
97 * Last modem route| 0x0001234500002280 | DMA address for Nth route rule
98 * |--------------------|
99 * 1st AP route | 0x0001234500001100 | DMA address for route rule (N+1)
100 * |--------------------|
101 * 2nd AP route | 0x0001234500001140 | DMA address for next route rule
102 * |--------------------|
104 * |--------------------|
105 * Last AP route | 0x0001234500002280 | DMA address for last route rule
106 * ----------------------
113 (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
115 /* Filter or route rules consist of a set of 32-bit values followed by a
116 * 32-bit all-zero rule list terminator. The "zero rule" is simply an
117 * all-zero rule followed by the list terminator.
126 /* Filter and route tables contain DMA addresses that refer in ipa_table_validate_build()
128 * is 64 bits regardless of what the size of an AP DMA address in ipa_table_validate_build()
136 * It is a 64-bit block of zeroed memory. Code in ipa_table_init() in ipa_table_validate_build()
153 struct device *dev = &ipa->pdev->dev; in ipa_table_valid_one()
154 const struct ipa_mem *mem; in ipa_table_valid_one() local
159 mem = hashed ? &ipa->mem[IPA_MEM_V6_ROUTE_HASHED] in ipa_table_valid_one()
160 : &ipa->mem[IPA_MEM_V6_ROUTE]; in ipa_table_valid_one()
162 mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED] in ipa_table_valid_one()
163 : &ipa->mem[IPA_MEM_V4_ROUTE]; in ipa_table_valid_one()
167 mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED] in ipa_table_valid_one()
168 : &ipa->mem[IPA_MEM_V6_FILTER]; in ipa_table_valid_one()
170 mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED] in ipa_table_valid_one()
171 : &ipa->mem[IPA_MEM_V4_FILTER]; in ipa_table_valid_one()
175 if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed)) in ipa_table_valid_one()
178 /* mem->size >= size is sufficient, but we'll demand more */ in ipa_table_valid_one()
179 if (mem->size == size) in ipa_table_valid_one()
183 if (hashed && !mem->size) in ipa_table_valid_one()
188 route ? "route" : "filter", mem->size, size); in ipa_table_valid_one()
212 struct device *dev = &ipa->pdev->dev; in ipa_filter_map_valid()
253 return ipa->table_addr + skip * sizeof(*ipa->table_virt); in ipa_table_addr()
257 u16 first, u16 count, const struct ipa_mem *mem) in ipa_table_reset_add() argument
259 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_table_reset_add()
265 if (!mem->size) in ipa_table_reset_add()
271 offset = mem->offset + first * sizeof(__le64); in ipa_table_reset_add()
280 * for the IPv4 and IPv6 non-hashed and hashed filter tables.
283 ipa_filter_reset_table(struct ipa *ipa, const struct ipa_mem *mem, bool modem) in ipa_filter_reset_table() argument
285 u32 ep_mask = ipa->filter_map; in ipa_filter_reset_table()
290 if (!mem->size) in ipa_filter_reset_table()
295 dev_err(&ipa->pdev->dev, in ipa_filter_reset_table()
298 return -EBUSY; in ipa_filter_reset_table()
308 endpoint = &ipa->endpoint[endpoint_id]; in ipa_filter_reset_table()
309 if (endpoint->ee_id != ee_id) in ipa_filter_reset_table()
312 ipa_table_reset_add(trans, true, endpoint_id, 1, mem); in ipa_filter_reset_table()
328 ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER], modem); in ipa_filter_reset()
332 ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER_HASHED], in ipa_filter_reset()
337 ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER], modem); in ipa_filter_reset()
340 ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER_HASHED], in ipa_filter_reset()
348 * won't exceed the per-transaction command limit.
358 dev_err(&ipa->pdev->dev, in ipa_route_reset()
361 return -EBUSY; in ipa_route_reset()
373 &ipa->mem[IPA_MEM_V4_ROUTE]); in ipa_route_reset()
375 &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]); in ipa_route_reset()
378 &ipa->mem[IPA_MEM_V6_ROUTE]); in ipa_route_reset()
380 &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]); in ipa_route_reset()
389 struct device *dev = &ipa->pdev->dev; in ipa_table_reset()
409 u32 offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version); in ipa_table_hash_flush()
414 if (ipa->version == IPA_VERSION_4_2) in ipa_table_hash_flush()
419 dev_err(&ipa->pdev->dev, "no transaction for hash flush\n"); in ipa_table_hash_flush()
420 return -EBUSY; in ipa_table_hash_flush()
435 const struct ipa_mem *mem, in ipa_table_init_add() argument
438 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_table_init_add()
447 * in the filter table. The hashed and non-hashed filter table in ipa_table_init_add()
453 count = 1 + hweight32(ipa->filter_map); in ipa_table_init_add()
454 hash_count = hash_mem->size ? count : 0; in ipa_table_init_add()
456 count = mem->size / sizeof(__le64); in ipa_table_init_add()
457 hash_count = hash_mem->size / sizeof(__le64); in ipa_table_init_add()
465 ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr, in ipa_table_init_add()
466 hash_size, hash_mem->offset, hash_addr); in ipa_table_init_add()
475 dev_err(&ipa->pdev->dev, "no transaction for table setup\n"); in ipa_table_setup()
476 return -EBUSY; in ipa_table_setup()
480 &ipa->mem[IPA_MEM_V4_ROUTE], in ipa_table_setup()
481 &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]); in ipa_table_setup()
484 &ipa->mem[IPA_MEM_V6_ROUTE], in ipa_table_setup()
485 &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]); in ipa_table_setup()
488 &ipa->mem[IPA_MEM_V4_FILTER], in ipa_table_setup()
489 &ipa->mem[IPA_MEM_V4_FILTER_HASHED]); in ipa_table_setup()
492 &ipa->mem[IPA_MEM_V6_FILTER], in ipa_table_setup()
493 &ipa->mem[IPA_MEM_V6_FILTER_HASHED]); in ipa_table_setup()
506 * ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple
514 u32 endpoint_id = endpoint->endpoint_id; in ipa_filter_tuple_zero()
520 val = ioread32(endpoint->ipa->reg_virt + offset); in ipa_filter_tuple_zero()
522 /* Zero all filter-related fields, preserving the rest */ in ipa_filter_tuple_zero()
525 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_filter_tuple_zero()
531 u32 ep_mask = ipa->filter_map; in ipa_filter_config()
534 if (ipa->version == IPA_VERSION_4_2) in ipa_filter_config()
543 endpoint = &ipa->endpoint[endpoint_id]; in ipa_filter_config()
544 if (endpoint->ee_id == ee_id) in ipa_filter_config()
557 route_id <= IPA_ROUTE_MODEM_MIN + IPA_ROUTE_MODEM_COUNT - 1; in ipa_route_id_modem()
561 * ipa_route_tuple_zero() - Zero a hashed route table entry tuple
572 val = ioread32(ipa->reg_virt + offset); in ipa_route_tuple_zero()
574 /* Zero all route-related fields, preserving the rest */ in ipa_route_tuple_zero()
577 iowrite32(val, ipa->reg_virt + offset); in ipa_route_tuple_zero()
585 if (ipa->version == IPA_VERSION_4_2) in ipa_route_config()
615 * Initialize a coherent DMA allocation containing initialized filter and
622 * entries are 64 bits wide, and (other than the bitmap) contain the DMA
627 * Each entry in a route table is the DMA address of a routing rule. For
628 * routing there is also a 64-bit "zero rule" that means no routing, and
633 * aligned on a 128 byte boundary. The coherent DMA buffer we allocate here
635 * allocated space. In ipa_table_init() we verify the minimum DMA allocation
638 * +-------------------+
639 * --> | zero rule |
640 * / |-------------------|
642 * |\ |-------------------|
643 * | ---- zero rule address | \
644 * |\ |-------------------| |
645 * | ---- zero rule address | | IPA_FILTER_COUNT_MAX
646 * | |-------------------| > or IPA_ROUTE_COUNT_MAX,
648 * \ |-------------------| |
649 * ---- zero rule address | /
650 * +-------------------+
655 struct device *dev = &ipa->pdev->dev; in ipa_table_init()
664 * aligned on a 128-byte boundary. We put the "zero rule" at the in ipa_table_init()
665 * base of the table area allocated here. The DMA address returned in ipa_table_init()
666 * by dma_alloc_coherent() is guaranteed to be a power-of-2 number in ipa_table_init()
672 return -ENOMEM; in ipa_table_init()
674 ipa->table_virt = virt; in ipa_table_init()
675 ipa->table_addr = addr; in ipa_table_init()
685 *virt++ = cpu_to_le64((u64)ipa->filter_map << 1); in ipa_table_init()
687 /* All the rest contain the DMA address of the zero rule */ in ipa_table_init()
689 while (count--) in ipa_table_init()
698 struct device *dev = &ipa->pdev->dev; in ipa_table_exit()
703 dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr); in ipa_table_exit()
704 ipa->table_addr = 0; in ipa_table_exit()
705 ipa->table_virt = NULL; in ipa_table_exit()