• Home
  • Raw
  • Download

Lines Matching +full:rpm +full:- +full:msg +full:- +full:ram

3  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
49 * two regions are cached and non-cached memory respectively. Each region
53 * Items in the non-cached region are allocated from the start of the partition
55 * is hence the region between the cached and non-cached offsets. The header of
64 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
97 * struct smem_proc_comm - proc_comm communication struct (legacy)
109 * struct smem_global_entry - entry to reference smem items on the heap
125 * struct smem_header - header found in beginning of primary smem region
145 * struct smem_ptable_entry - one entry in the @smem_ptable list
165 * struct smem_ptable - partition table for the private partitions
183 * struct smem_partition_header - header of the partitions
207 * struct smem_private_entry - header of each item in the private partition
226 * struct smem_info - smem region info located after the table of contents
244 * struct smem_region - representation of a chunk of memory used for smem
256 * struct qcom_smem - device data for the smem device
288 return p + le32_to_cpu(phdr->offset_free_uncached); in phdr_to_last_uncached_entry()
298 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline); in phdr_to_first_cached_entry()
306 return p + le32_to_cpu(phdr->offset_free_cached); in phdr_to_last_cached_entry()
322 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) + in uncached_entry_next()
323 le32_to_cpu(e->size); in uncached_entry_next()
331 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline); in cached_entry_next()
338 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); in uncached_entry_to_item()
345 return p - le32_to_cpu(e->size); in cached_entry_to_item()
368 if (hdr->canary != SMEM_PRIVATE_CANARY) in qcom_smem_alloc_private()
370 if (le16_to_cpu(hdr->item) == item) in qcom_smem_alloc_private()
371 return -EEXIST; in qcom_smem_alloc_private()
379 dev_err(smem->dev, "Out of memory\n"); in qcom_smem_alloc_private()
380 return -ENOSPC; in qcom_smem_alloc_private()
383 hdr->canary = SMEM_PRIVATE_CANARY; in qcom_smem_alloc_private()
384 hdr->item = cpu_to_le16(item); in qcom_smem_alloc_private()
385 hdr->size = cpu_to_le32(ALIGN(size, 8)); in qcom_smem_alloc_private()
386 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size); in qcom_smem_alloc_private()
387 hdr->padding_hdr = 0; in qcom_smem_alloc_private()
395 le32_add_cpu(&phdr->offset_free_uncached, alloc_size); in qcom_smem_alloc_private()
399 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", in qcom_smem_alloc_private()
400 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); in qcom_smem_alloc_private()
402 return -EINVAL; in qcom_smem_alloc_private()
412 header = smem->regions[0].virt_base; in qcom_smem_alloc_global()
413 entry = &header->toc[item]; in qcom_smem_alloc_global()
414 if (entry->allocated) in qcom_smem_alloc_global()
415 return -EEXIST; in qcom_smem_alloc_global()
418 if (WARN_ON(size > le32_to_cpu(header->available))) in qcom_smem_alloc_global()
419 return -ENOMEM; in qcom_smem_alloc_global()
421 entry->offset = header->free_offset; in qcom_smem_alloc_global()
422 entry->size = cpu_to_le32(size); in qcom_smem_alloc_global()
430 entry->allocated = cpu_to_le32(1); in qcom_smem_alloc_global()
432 le32_add_cpu(&header->free_offset, size); in qcom_smem_alloc_global()
433 le32_add_cpu(&header->available, -size); in qcom_smem_alloc_global()
439 * qcom_smem_alloc() - allocate space for a smem item
440 * @host: remote processor id, or -1
454 return -EPROBE_DEFER; in qcom_smem_alloc()
457 dev_err(__smem->dev, in qcom_smem_alloc()
459 return -EINVAL; in qcom_smem_alloc()
462 if (WARN_ON(item >= __smem->item_count)) in qcom_smem_alloc()
463 return -EINVAL; in qcom_smem_alloc()
465 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, in qcom_smem_alloc()
471 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { in qcom_smem_alloc()
472 phdr = __smem->partitions[host]; in qcom_smem_alloc()
474 } else if (__smem->global_partition) { in qcom_smem_alloc()
475 phdr = __smem->global_partition; in qcom_smem_alloc()
481 hwspin_unlock_irqrestore(__smem->hwlock, &flags); in qcom_smem_alloc()
497 header = smem->regions[0].virt_base; in qcom_smem_get_global()
498 entry = &header->toc[item]; in qcom_smem_get_global()
499 if (!entry->allocated) in qcom_smem_get_global()
500 return ERR_PTR(-ENXIO); in qcom_smem_get_global()
502 aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK; in qcom_smem_get_global()
504 for (i = 0; i < smem->num_regions; i++) { in qcom_smem_get_global()
505 area = &smem->regions[i]; in qcom_smem_get_global()
507 if (area->aux_base == aux_base || !aux_base) { in qcom_smem_get_global()
509 *size = le32_to_cpu(entry->size); in qcom_smem_get_global()
510 return area->virt_base + le32_to_cpu(entry->offset); in qcom_smem_get_global()
514 return ERR_PTR(-ENOENT); in qcom_smem_get_global()
529 if (e->canary != SMEM_PRIVATE_CANARY) in qcom_smem_get_private()
532 if (le16_to_cpu(e->item) == item) { in qcom_smem_get_private()
534 *size = le32_to_cpu(e->size) - in qcom_smem_get_private()
535 le16_to_cpu(e->padding_data); in qcom_smem_get_private()
549 if (e->canary != SMEM_PRIVATE_CANARY) in qcom_smem_get_private()
552 if (le16_to_cpu(e->item) == item) { in qcom_smem_get_private()
554 *size = le32_to_cpu(e->size) - in qcom_smem_get_private()
555 le16_to_cpu(e->padding_data); in qcom_smem_get_private()
563 return ERR_PTR(-ENOENT); in qcom_smem_get_private()
566 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", in qcom_smem_get_private()
567 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); in qcom_smem_get_private()
569 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
573 * qcom_smem_get() - resolve ptr of size of a smem item
574 * @host: the remote processor, or -1
587 void *ptr = ERR_PTR(-EPROBE_DEFER); in qcom_smem_get()
592 if (WARN_ON(item >= __smem->item_count)) in qcom_smem_get()
593 return ERR_PTR(-EINVAL); in qcom_smem_get()
595 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, in qcom_smem_get()
601 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { in qcom_smem_get()
602 phdr = __smem->partitions[host]; in qcom_smem_get()
603 cacheln = __smem->cacheline[host]; in qcom_smem_get()
605 } else if (__smem->global_partition) { in qcom_smem_get()
606 phdr = __smem->global_partition; in qcom_smem_get()
607 cacheln = __smem->global_cacheline; in qcom_smem_get()
613 hwspin_unlock_irqrestore(__smem->hwlock, &flags); in qcom_smem_get()
621 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
622 * @host: the remote processor identifying a partition, or -1
634 return -EPROBE_DEFER; in qcom_smem_get_free_space()
636 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { in qcom_smem_get_free_space()
637 phdr = __smem->partitions[host]; in qcom_smem_get_free_space()
638 ret = le32_to_cpu(phdr->offset_free_cached) - in qcom_smem_get_free_space()
639 le32_to_cpu(phdr->offset_free_uncached); in qcom_smem_get_free_space()
640 } else if (__smem->global_partition) { in qcom_smem_get_free_space()
641 phdr = __smem->global_partition; in qcom_smem_get_free_space()
642 ret = le32_to_cpu(phdr->offset_free_cached) - in qcom_smem_get_free_space()
643 le32_to_cpu(phdr->offset_free_uncached); in qcom_smem_get_free_space()
645 header = __smem->regions[0].virt_base; in qcom_smem_get_free_space()
646 ret = le32_to_cpu(header->available); in qcom_smem_get_free_space()
654 * qcom_smem_virt_to_phys() - return the physical address associated
664 for (i = 0; i < __smem->num_regions; i++) { in qcom_smem_virt_to_phys()
665 struct smem_region *region = &__smem->regions[i]; in qcom_smem_virt_to_phys()
667 if (p < region->virt_base) in qcom_smem_virt_to_phys()
669 if (p < region->virt_base + region->size) { in qcom_smem_virt_to_phys()
670 u64 offset = p - region->virt_base; in qcom_smem_virt_to_phys()
672 return (phys_addr_t)region->aux_base + offset; in qcom_smem_virt_to_phys()
685 header = smem->regions[0].virt_base; in qcom_smem_get_sbl_version()
686 versions = header->version; in qcom_smem_get_sbl_version()
696 ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; in qcom_smem_get_ptable()
697 if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) in qcom_smem_get_ptable()
698 return ERR_PTR(-ENOENT); in qcom_smem_get_ptable()
700 version = le32_to_cpu(ptable->version); in qcom_smem_get_ptable()
702 dev_err(smem->dev, in qcom_smem_get_ptable()
704 return ERR_PTR(-EINVAL); in qcom_smem_get_ptable()
718 info = (struct smem_info *)&ptable->entry[ptable->num_entries]; in qcom_smem_get_item_count()
719 if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic))) in qcom_smem_get_item_count()
722 return le16_to_cpu(info->num_items); in qcom_smem_get_item_count()
734 if (smem->global_partition) { in qcom_smem_set_global_partition()
735 dev_err(smem->dev, "Already found the global partition\n"); in qcom_smem_set_global_partition()
736 return -EINVAL; in qcom_smem_set_global_partition()
743 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { in qcom_smem_set_global_partition()
744 entry = &ptable->entry[i]; in qcom_smem_set_global_partition()
745 host0 = le16_to_cpu(entry->host0); in qcom_smem_set_global_partition()
746 host1 = le16_to_cpu(entry->host1); in qcom_smem_set_global_partition()
755 dev_err(smem->dev, "Missing entry for global partition\n"); in qcom_smem_set_global_partition()
756 return -EINVAL; in qcom_smem_set_global_partition()
759 if (!le32_to_cpu(entry->offset) || !le32_to_cpu(entry->size)) { in qcom_smem_set_global_partition()
760 dev_err(smem->dev, "Invalid entry for global partition\n"); in qcom_smem_set_global_partition()
761 return -EINVAL; in qcom_smem_set_global_partition()
764 header = smem->regions[0].virt_base + le32_to_cpu(entry->offset); in qcom_smem_set_global_partition()
765 host0 = le16_to_cpu(header->host0); in qcom_smem_set_global_partition()
766 host1 = le16_to_cpu(header->host1); in qcom_smem_set_global_partition()
768 if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) { in qcom_smem_set_global_partition()
769 dev_err(smem->dev, "Global partition has invalid magic\n"); in qcom_smem_set_global_partition()
770 return -EINVAL; in qcom_smem_set_global_partition()
774 dev_err(smem->dev, "Global partition hosts are invalid\n"); in qcom_smem_set_global_partition()
775 return -EINVAL; in qcom_smem_set_global_partition()
778 if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) { in qcom_smem_set_global_partition()
779 dev_err(smem->dev, "Global partition has invalid size\n"); in qcom_smem_set_global_partition()
780 return -EINVAL; in qcom_smem_set_global_partition()
783 size = le32_to_cpu(header->offset_free_uncached); in qcom_smem_set_global_partition()
784 if (size > le32_to_cpu(header->size)) { in qcom_smem_set_global_partition()
785 dev_err(smem->dev, in qcom_smem_set_global_partition()
787 return -EINVAL; in qcom_smem_set_global_partition()
790 smem->global_partition = header; in qcom_smem_set_global_partition()
791 smem->global_cacheline = le32_to_cpu(entry->cacheline); in qcom_smem_set_global_partition()
810 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { in qcom_smem_enumerate_partitions()
811 entry = &ptable->entry[i]; in qcom_smem_enumerate_partitions()
812 host0 = le16_to_cpu(entry->host0); in qcom_smem_enumerate_partitions()
813 host1 = le16_to_cpu(entry->host1); in qcom_smem_enumerate_partitions()
818 if (!le32_to_cpu(entry->offset)) in qcom_smem_enumerate_partitions()
821 if (!le32_to_cpu(entry->size)) in qcom_smem_enumerate_partitions()
830 dev_err(smem->dev, in qcom_smem_enumerate_partitions()
833 return -EINVAL; in qcom_smem_enumerate_partitions()
836 if (smem->partitions[remote_host]) { in qcom_smem_enumerate_partitions()
837 dev_err(smem->dev, in qcom_smem_enumerate_partitions()
840 return -EINVAL; in qcom_smem_enumerate_partitions()
843 header = smem->regions[0].virt_base + le32_to_cpu(entry->offset); in qcom_smem_enumerate_partitions()
844 host0 = le16_to_cpu(header->host0); in qcom_smem_enumerate_partitions()
845 host1 = le16_to_cpu(header->host1); in qcom_smem_enumerate_partitions()
847 if (memcmp(header->magic, SMEM_PART_MAGIC, in qcom_smem_enumerate_partitions()
848 sizeof(header->magic))) { in qcom_smem_enumerate_partitions()
849 dev_err(smem->dev, in qcom_smem_enumerate_partitions()
851 return -EINVAL; in qcom_smem_enumerate_partitions()
855 dev_err(smem->dev, in qcom_smem_enumerate_partitions()
857 return -EINVAL; in qcom_smem_enumerate_partitions()
861 dev_err(smem->dev, in qcom_smem_enumerate_partitions()
863 return -EINVAL; in qcom_smem_enumerate_partitions()
866 if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) { in qcom_smem_enumerate_partitions()
867 dev_err(smem->dev, in qcom_smem_enumerate_partitions()
869 return -EINVAL; in qcom_smem_enumerate_partitions()
872 if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) { in qcom_smem_enumerate_partitions()
873 dev_err(smem->dev, in qcom_smem_enumerate_partitions()
875 return -EINVAL; in qcom_smem_enumerate_partitions()
878 smem->partitions[remote_host] = header; in qcom_smem_enumerate_partitions()
879 smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline); in qcom_smem_enumerate_partitions()
892 np = of_parse_phandle(dev->of_node, name, 0); in qcom_smem_map_memory()
895 return -EINVAL; in qcom_smem_map_memory()
903 smem->regions[i].aux_base = (u32)r.start; in qcom_smem_map_memory()
904 smem->regions[i].size = resource_size(&r); in qcom_smem_map_memory()
905 smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, resource_size(&r)); in qcom_smem_map_memory()
906 if (!smem->regions[i].virt_base) in qcom_smem_map_memory()
907 return -ENOMEM; in qcom_smem_map_memory()
923 if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL)) in qcom_smem_probe()
927 smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL); in qcom_smem_probe()
929 return -ENOMEM; in qcom_smem_probe()
931 smem->dev = &pdev->dev; in qcom_smem_probe()
932 smem->num_regions = num_regions; in qcom_smem_probe()
934 ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0); in qcom_smem_probe()
938 if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev, in qcom_smem_probe()
939 "qcom,rpm-msg-ram", 1))) in qcom_smem_probe()
942 header = smem->regions[0].virt_base; in qcom_smem_probe()
943 if (le32_to_cpu(header->initialized) != 1 || in qcom_smem_probe()
944 le32_to_cpu(header->reserved)) { in qcom_smem_probe()
945 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); in qcom_smem_probe()
946 return -EINVAL; in qcom_smem_probe()
955 smem->item_count = qcom_smem_get_item_count(smem); in qcom_smem_probe()
958 smem->item_count = SMEM_ITEM_COUNT; in qcom_smem_probe()
961 dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); in qcom_smem_probe()
962 return -EINVAL; in qcom_smem_probe()
966 if (ret < 0 && ret != -ENOENT) in qcom_smem_probe()
969 hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); in qcom_smem_probe()
971 if (hwlock_id != -EPROBE_DEFER) in qcom_smem_probe()
972 dev_err(&pdev->dev, "failed to retrieve hwlock\n"); in qcom_smem_probe()
976 smem->hwlock = hwspin_lock_request_specific(hwlock_id); in qcom_smem_probe()
977 if (!smem->hwlock) in qcom_smem_probe()
978 return -ENXIO; in qcom_smem_probe()
987 hwspin_lock_free(__smem->hwlock); in qcom_smem_remove()
1003 .name = "qcom-smem",