• Home
  • Raw
  • Download

Lines Matching +full:unmapped +full:- +full:event +full:- +full:sources

20  *      - Redistributions of source code must retain the above
24 * - Redistributions in binary form must reproduce the above
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
45 #include <linux/dma-mapping.h>
156 if (attr->gid_type == IB_GID_TYPE_IB) in rdma_gid_attr_network_type()
159 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid)) in rdma_gid_attr_network_type()
189 * This device supports a per-device lkey or stag that can be
409 default: return -1; in ib_mtu_enum_to_int()
450 default: return -1; in ib_width_enum_to_int()
466 * @lock - Mutex to protect parallel write access to lifespan and values
469 * @timestamp - Used by the core code to track when the last update was
470 * @lifespan - Used by the core code to determine how old the counters
474 * @name - Array of pointers to static names used for the counters in
476 * @num_counters - How many hardware counters there are. If name is
480 * @value - Array of u64 counters that are accessed by the sysfs code and
494 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
496 * @names - Array of static const char *
497 * @num_counters - How many elements in array
498 * @lifespan - How many milliseconds between updates
510 stats->names = names; in rdma_alloc_hw_stats_struct()
511 stats->num_counters = num_counters; in rdma_alloc_hw_stats_struct()
512 stats->lifespan = msecs_to_jiffies(lifespan); in rdma_alloc_hw_stats_struct()
646 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
657 enum ib_event_type event; member
668 (_ptr)->device = _device; \
669 (_ptr)->handler = _handler; \
670 INIT_LIST_HEAD(&(_ptr)->list); \
737 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
745 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
753 * enum ib_mr_type - memory region type
757 * signature operations (data-integrity
761 * the normal mr constraints - see
773 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
781 * Signature T10-DIF block-guard types
782 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
791 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
793 * @bg_type: T10-DIF block guard type (CRC|CSUM)
816 * struct ib_sig_domain - Parameters for signature domain
829 * struct ib_sig_attrs - Parameters for signature handover operation
847 * Signature check masks (8 bytes in total) according to the T10-PI standard:
848 * -------- -------- ------------
851 * -------- -------- ------------
860 * struct ib_sig_err - signature error descriptor
875 * struct ib_mr_status - Memory region status container
888 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
1086 * indices into a 2-entry table.
1130 /* reserve bits 26-31 for low level drivers' internal use */
1325 /* reserve bits 26-31 for low level drivers' internal use */
1445 IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
1449 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1456 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1475 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1477 /* uobj is being cleaned-up before being committed */
1524 /* FIXME, save memory: ufile->context == context */
1841 /* default unicast and multicast rule -
1845 /* default multicast rule -
1849 /* sniffer rule - receive all port traffic */
2123 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2124 * This is done in order to share the same flags between user-space and
2200 /* rdma netdev type - specifies protocol type */
2207 * struct rdma_netdev - rdma netdev
2268 * spinlock and the lists_rwsem read-write semaphore */
2284 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2286 * core when the device is removed. A lifespan of -1 in the return
2292 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2293 * @index - The index in the value array we wish to have updated, or
2295 * Return codes -
2296 * < 0 - Error, no counters updated
2297 * index - Updated the single counter pointed to by index
2298 * num_counters - Updated all counters (will reset the timestamp
2324 * link layer is either IB or iWarp. It is no-op if @port_num port
2331 * of device of port at gid index available at @attr. Meta-info of
2535 * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
2645 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; in ib_copy_from_udata()
2650 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; in ib_copy_to_udata()
2675 return ib_is_buffer_cleared(udata->inbuf + offset, len); in ib_is_udata_cleared()
2679 * ib_is_destroy_retryable - Check whether the uobject destruction
2685 * This function is a helper function that IB layer and low-level drivers
2687 * retry-able.
2697 uobj->context->cleanup_retryable); in ib_is_destroy_retryable()
2701 * ib_destroy_usecnt - Called during destruction to check the usecnt
2706 * Non-zero usecnts will block destruction unless destruction was triggered by
2713 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj)) in ib_destroy_usecnt()
2714 return -EBUSY; in ib_destroy_usecnt()
2719 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2728 * This function is a helper function that a low-level driver's
2740 void ib_dispatch_event(struct ib_event *event);
2749 * rdma_cap_ib_switch - Check if the device is IB switch
2759 return device->is_switch; in rdma_cap_ib_switch()
2763 * rdma_start_port - Return the first valid port number for the device
2776 * rdma_end_port - Return the last valid port number for the device
2785 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; in rdma_end_port()
2798 return device->port_immutable[port_num].core_cap_flags & in rdma_is_grh_required()
2804 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; in rdma_protocol_ib()
2809 return device->port_immutable[port_num].core_cap_flags & in rdma_protocol_roce()
2815 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; in rdma_protocol_roce_udp_encap()
2820 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; in rdma_protocol_roce_eth_encap()
2825 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; in rdma_protocol_iwarp()
2836 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET; in rdma_protocol_raw_packet()
2841 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC; in rdma_protocol_usnic()
2845 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2858 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; in rdma_cap_ib_mad()
2862 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2882 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) in rdma_cap_opa_mad()
2887 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2908 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; in rdma_cap_ib_smi()
2912 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2917 * The InfiniBand Communication Manager is one of many pre-defined General
2928 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; in rdma_cap_ib_cm()
2932 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2945 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; in rdma_cap_iw_cm()
2949 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2954 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2965 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; in rdma_cap_ib_sa()
2969 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2991 * rdma_cap_af_ib - Check if the port of device has the capability
3005 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; in rdma_cap_af_ib()
3009 * rdma_cap_eth_ah - Check if the port of device has the capability
3026 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; in rdma_cap_eth_ah()
3030 * rdma_cap_opa_ah - Check if the port of device supports
3040 return (device->port_immutable[port_num].core_cap_flags & in rdma_cap_opa_ah()
3045 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3058 return device->port_immutable[port_num].max_mad_size; in rdma_max_mad_size()
3062 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3078 device->add_gid && device->del_gid; in rdma_cap_roce_gid_table()
3122 * the rkey for it into pd->unsafe_global_rkey. This can be used by
3139 * rdma_create_ah - Creates an address handle for the given address vector.
3149 * rdma_create_user_ah - Creates an address handle for the given address vector.
3164 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3176 * ib_get_rdma_header_version - Get the header version
3182 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3204 * ib_create_ah_from_wc - Creates an address handle associated with the
3219 * rdma_modify_ah - Modifies the address vector associated with an address
3228 * rdma_query_ah - Queries the address vector associated with an address
3237 * rdma_destroy_ah - Destroys an address handle.
3243 * ib_create_srq - Creates a SRQ associated with the specified protection
3250 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
3259 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3263 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3275 * ib_query_srq - Returns the attribute list and current values for the
3284 * ib_destroy_srq - Destroys the specified SRQ.
3290 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3302 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr ? : &dummy); in ib_post_srq_recv()
3306 * ib_create_qp - Creates a QP associated with the specified protection
3317 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3321 * @attr_mask: A bit-mask used to specify which attributes of the QP
3333 * ib_modify_qp - Modifies the attributes for the specified QP and then
3338 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3346 * ib_query_qp - Returns the attribute list and current values for the
3350 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3362 * ib_destroy_qp - Destroys the specified QP.
3368 * ib_open_qp - Obtain a reference to an existing sharable QP.
3369 * @xrcd - XRC domain
3378 * ib_close_qp - Release an external reference to a QP.
3387 * ib_post_send - Posts a list of work requests to the send queue of
3405 return qp->device->post_send(qp, send_wr, bad_send_wr ? : &dummy); in ib_post_send()
3409 * ib_post_recv - Posts a list of work requests to the receive queue of
3422 return qp->device->post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); in ib_post_recv()
3435 * ib_create_cq - Creates a CQ on the specified device.
3437 * @comp_handler: A user-specified callback that is invoked when a
3438 * completion event occurs on the CQ.
3439 * @event_handler: A user-specified callback that is invoked when an
3440 * asynchronous event not associated with a completion occurs on the CQ.
3442 * the associated completion and event handlers.
3457 * ib_resize_cq - Modifies the capacity of the CQ.
3466 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3468 * @cq_count: number of CQEs that will trigger an event
3469 * @cq_period: max period of time in usec before triggering an event
3475 * ib_destroy_cq - Destroys the specified CQ.
3481 * ib_poll_cq - poll a CQ for completion(s)
3490 * non-negative and < num_entries, then the CQ was emptied.
3495 return cq->device->poll_cq(cq, num_entries, wc); in ib_poll_cq()
3499 * ib_req_notify_cq - Request completion notification on a CQ.
3500 * @cq: The CQ to generate an event for.
3503 * to request an event on the next solicited event or next work
3512 * were missed and it is safe to wait for another event. In
3515 * notification event.
3518 * make sure it is empty to avoid missing an event because of a
3523 * completion notification event.
3528 return cq->device->req_notify_cq(cq, flags); in ib_req_notify_cq()
3532 * ib_req_ncomp_notif - Request completion notification when there are
3534 * @cq: The CQ to generate an event for.
3536 * CQ before an event is generated.
3540 return cq->device->req_ncomp_notif ? in ib_req_ncomp_notif()
3541 cq->device->req_ncomp_notif(cq, wc_cnt) : in ib_req_ncomp_notif()
3542 -ENOSYS; in ib_req_ncomp_notif()
3546 * ib_dma_mapping_error - check a DMA addr for error
3552 return dma_mapping_error(dev->dma_device, dma_addr); in ib_dma_mapping_error()
3556 * ib_dma_map_single - Map a kernel virtual address to DMA address
3566 return dma_map_single(dev->dma_device, cpu_addr, size, direction); in ib_dma_map_single()
3570 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3580 dma_unmap_single(dev->dma_device, addr, size, direction); in ib_dma_unmap_single()
3584 * ib_dma_map_page - Map a physical page to DMA address
3597 return dma_map_page(dev->dma_device, page, offset, size, direction); in ib_dma_map_page()
3601 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3611 dma_unmap_page(dev->dma_device, addr, size, direction); in ib_dma_unmap_page()
3615 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3625 return dma_map_sg(dev->dma_device, sg, nents, direction); in ib_dma_map_sg()
3629 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3639 dma_unmap_sg(dev->dma_device, sg, nents, direction); in ib_dma_unmap_sg()
3647 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, in ib_dma_map_sg_attrs()
3656 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); in ib_dma_unmap_sg_attrs()
3659 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3673 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3687 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3698 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); in ib_dma_sync_single_for_cpu()
3702 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3713 dma_sync_single_for_device(dev->dma_device, addr, size, dir); in ib_dma_sync_single_for_device()
3717 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3728 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); in ib_dma_alloc_coherent()
3732 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3742 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); in ib_dma_free_coherent()
3746 * ib_dereg_mr - Deregisters a memory region and removes it from the
3759 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3761 * @mr - struct ib_mr pointer to be updated.
3762 * @newkey - new key to be used.
3766 mr->lkey = (mr->lkey & 0xffffff00) | newkey; in ib_update_fast_reg_key()
3767 mr->rkey = (mr->rkey & 0xffffff00) | newkey; in ib_update_fast_reg_key()
3771 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3773 * @rkey - the rkey to increment.
3782 * ib_alloc_fmr - Allocates a unmapped fast memory region.
3783 * @pd: The protection domain associated with the unmapped region.
3785 * @fmr_attr: Attributes of the unmapped region.
3795 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3805 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); in ib_map_phys_fmr()
3809 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3815 * ib_dealloc_fmr - Deallocates a fast memory region.
3821 * ib_attach_mcast - Attaches the specified QP to a multicast group.
3835 * ib_detach_mcast - Detaches the specified QP from a multicast group.
3843 * ib_alloc_xrcd - Allocates an XRC domain.
3852 * ib_dealloc_xrcd - Deallocates an XRC domain.
3865 return -EINVAL; in ib_check_mr_access()
3868 return -EINVAL; in ib_check_mr_access()
3925 mr->iova = 0; in ib_map_mr_sg_zbva()
3941 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE) in rdma_ah_retrieve_dmac()
3942 return attr->roce.dmac; in rdma_ah_retrieve_dmac()
3948 if (attr->type == RDMA_AH_ATTR_TYPE_IB) in rdma_ah_set_dlid()
3949 attr->ib.dlid = (u16)dlid; in rdma_ah_set_dlid()
3950 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) in rdma_ah_set_dlid()
3951 attr->opa.dlid = dlid; in rdma_ah_set_dlid()
3956 if (attr->type == RDMA_AH_ATTR_TYPE_IB) in rdma_ah_get_dlid()
3957 return attr->ib.dlid; in rdma_ah_get_dlid()
3958 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) in rdma_ah_get_dlid()
3959 return attr->opa.dlid; in rdma_ah_get_dlid()
3965 attr->sl = sl; in rdma_ah_set_sl()
3970 return attr->sl; in rdma_ah_get_sl()
3976 if (attr->type == RDMA_AH_ATTR_TYPE_IB) in rdma_ah_set_path_bits()
3977 attr->ib.src_path_bits = src_path_bits; in rdma_ah_set_path_bits()
3978 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) in rdma_ah_set_path_bits()
3979 attr->opa.src_path_bits = src_path_bits; in rdma_ah_set_path_bits()
3984 if (attr->type == RDMA_AH_ATTR_TYPE_IB) in rdma_ah_get_path_bits()
3985 return attr->ib.src_path_bits; in rdma_ah_get_path_bits()
3986 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) in rdma_ah_get_path_bits()
3987 return attr->opa.src_path_bits; in rdma_ah_get_path_bits()
3994 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) in rdma_ah_set_make_grd()
3995 attr->opa.make_grd = make_grd; in rdma_ah_set_make_grd()
4000 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) in rdma_ah_get_make_grd()
4001 return attr->opa.make_grd; in rdma_ah_get_make_grd()
4007 attr->port_num = port_num; in rdma_ah_set_port_num()
4012 return attr->port_num; in rdma_ah_get_port_num()
4018 attr->static_rate = static_rate; in rdma_ah_set_static_rate()
4023 return attr->static_rate; in rdma_ah_get_static_rate()
4029 attr->ah_flags = flag; in rdma_ah_set_ah_flags()
4035 return attr->ah_flags; in rdma_ah_get_ah_flags()
4041 return &attr->grh; in rdma_ah_read_grh()
4048 return &attr->grh; in rdma_ah_retrieve_grh()
4055 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid)); in rdma_ah_set_dgid_raw()
4063 grh->dgid.global.subnet_prefix = prefix; in rdma_ah_set_subnet_prefix()
4071 grh->dgid.global.interface_id = if_id; in rdma_ah_set_interface_id()
4081 attr->ah_flags = IB_AH_GRH; in rdma_ah_set_grh()
4083 grh->dgid = *dgid; in rdma_ah_set_grh()
4084 grh->flow_label = flow_label; in rdma_ah_set_grh()
4085 grh->sgid_index = sgid_index; in rdma_ah_set_grh()
4086 grh->hop_limit = hop_limit; in rdma_ah_set_grh()
4087 grh->traffic_class = traffic_class; in rdma_ah_set_grh()
4088 grh->sgid_attr = NULL; in rdma_ah_set_grh()
4102 * rdma_ah_find_type - Return address handle type.
4122 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4124 * get the 32bit lid is from other sources for OPA.
4137 * ib_lid_be16 - Return lid in 16bit BE encoding.
4148 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4154 * completion vector (returns all-cpus map if the device driver doesn't
4160 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors || in ib_get_vector_affinity()
4161 !device->get_vector_affinity) in ib_get_vector_affinity()
4164 return device->get_vector_affinity(device, comp_vector); in ib_get_vector_affinity()
4171 uobj->object = ibflow; in ib_set_flow()
4172 ibflow->uobject = uobj; in ib_set_flow()
4175 atomic_inc(&qp->usecnt); in ib_set_flow()
4176 ibflow->qp = qp; in ib_set_flow()
4179 ibflow->device = device; in ib_set_flow()
4183 * rdma_roce_rescan_device - Rescan all of the network devices in the system