Lines Matching refs:pctx
86 static void pdp_context_delete(struct pdp_ctx *pctx);
153 static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx, in gtp_check_ms_ipv4() argument
164 return iph->daddr == pctx->ms_addr_ip4.s_addr; in gtp_check_ms_ipv4()
166 return iph->saddr == pctx->ms_addr_ip4.s_addr; in gtp_check_ms_ipv4()
172 static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx, in gtp_check_ms() argument
177 return gtp_check_ms_ipv4(skb, pctx, hdrlen, role); in gtp_check_ms()
182 static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, in gtp_rx() argument
185 if (!gtp_check_ms(skb, pctx, hdrlen, role)) { in gtp_rx()
186 netdev_dbg(pctx->dev, "No PDP ctx for this MS\n"); in gtp_rx()
192 !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) in gtp_rx()
195 netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n"); in gtp_rx()
203 skb->dev = pctx->dev; in gtp_rx()
205 dev_sw_netstats_rx_add(pctx->dev, skb->len); in gtp_rx()
217 struct pdp_ctx *pctx; in gtp0_udp_encap_recv() local
230 pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid)); in gtp0_udp_encap_recv()
231 if (!pctx) { in gtp0_udp_encap_recv()
236 return gtp_rx(pctx, skb, hdrlen, gtp->role); in gtp0_udp_encap_recv()
244 struct pdp_ctx *pctx; in gtp1u_udp_encap_recv() local
272 pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid)); in gtp1u_udp_encap_recv()
273 if (!pctx) { in gtp1u_udp_encap_recv()
278 return gtp_rx(pctx, skb, hdrlen, gtp->role); in gtp1u_udp_encap_recv()
400 static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) in gtp0_push_header() argument
410 gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff); in gtp0_push_header()
411 gtp0->flow = htons(pctx->u.v0.flow); in gtp0_push_header()
414 gtp0->tid = cpu_to_be64(pctx->u.v0.tid); in gtp0_push_header()
417 static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) in gtp1_push_header() argument
433 gtp1->tid = htonl(pctx->u.v1.o_tei); in gtp1_push_header()
445 struct pdp_ctx *pctx; member
452 switch (pktinfo->pctx->gtp_version) { in gtp_push_header()
455 gtp0_push_header(skb, pktinfo->pctx); in gtp_push_header()
459 gtp1_push_header(skb, pktinfo->pctx); in gtp_push_header()
466 struct pdp_ctx *pctx, struct rtable *rt, in gtp_set_pktinfo_ipv4() argument
472 pktinfo->pctx = pctx; in gtp_set_pktinfo_ipv4()
482 struct pdp_ctx *pctx; in gtp_build_skb_ip4() local
494 pctx = ipv4_pdp_find(gtp, iph->saddr); in gtp_build_skb_ip4()
496 pctx = ipv4_pdp_find(gtp, iph->daddr); in gtp_build_skb_ip4()
498 if (!pctx) { in gtp_build_skb_ip4()
503 netdev_dbg(dev, "found PDP context %p\n", pctx); in gtp_build_skb_ip4()
505 rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr); in gtp_build_skb_ip4()
508 &pctx->peer_addr_ip4.s_addr); in gtp_build_skb_ip4()
515 &pctx->peer_addr_ip4.s_addr); in gtp_build_skb_ip4()
527 switch (pctx->gtp_version) { in gtp_build_skb_ip4()
550 gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev); in gtp_build_skb_ip4()
703 struct pdp_ctx *pctx; in gtp_dellink() local
707 hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) in gtp_dellink()
708 pdp_context_delete(pctx); in gtp_dellink()
900 static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) in ipv4_pdp_fill() argument
902 pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]); in ipv4_pdp_fill()
903 pctx->af = AF_INET; in ipv4_pdp_fill()
904 pctx->peer_addr_ip4.s_addr = in ipv4_pdp_fill()
906 pctx->ms_addr_ip4.s_addr = in ipv4_pdp_fill()
909 switch (pctx->gtp_version) { in ipv4_pdp_fill()
915 pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]); in ipv4_pdp_fill()
916 pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]); in ipv4_pdp_fill()
919 pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]); in ipv4_pdp_fill()
920 pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]); in ipv4_pdp_fill()
930 struct pdp_ctx *pctx, *pctx_tid = NULL; in gtp_pdp_add() local
941 pctx = ipv4_pdp_find(gtp, ms_addr); in gtp_pdp_add()
942 if (pctx) in gtp_pdp_add()
959 if (pctx && pctx_tid) in gtp_pdp_add()
961 if (!pctx) in gtp_pdp_add()
962 pctx = pctx_tid; in gtp_pdp_add()
964 ipv4_pdp_fill(pctx, info); in gtp_pdp_add()
966 if (pctx->gtp_version == GTP_V0) in gtp_pdp_add()
968 pctx->u.v0.tid, pctx); in gtp_pdp_add()
969 else if (pctx->gtp_version == GTP_V1) in gtp_pdp_add()
971 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx); in gtp_pdp_add()
973 return pctx; in gtp_pdp_add()
977 pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC); in gtp_pdp_add()
978 if (pctx == NULL) in gtp_pdp_add()
982 pctx->sk = sk; in gtp_pdp_add()
983 pctx->dev = gtp->dev; in gtp_pdp_add()
984 ipv4_pdp_fill(pctx, info); in gtp_pdp_add()
985 atomic_set(&pctx->tx_seq, 0); in gtp_pdp_add()
987 switch (pctx->gtp_version) { in gtp_pdp_add()
994 hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size; in gtp_pdp_add()
997 hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size; in gtp_pdp_add()
1001 hlist_add_head_rcu(&pctx->hlist_addr, >p->addr_hash[hash_ms]); in gtp_pdp_add()
1002 hlist_add_head_rcu(&pctx->hlist_tid, >p->tid_hash[hash_tid]); in gtp_pdp_add()
1004 switch (pctx->gtp_version) { in gtp_pdp_add()
1007 pctx->u.v0.tid, &pctx->peer_addr_ip4, in gtp_pdp_add()
1008 &pctx->ms_addr_ip4, pctx); in gtp_pdp_add()
1012 pctx->u.v1.i_tei, pctx->u.v1.o_tei, in gtp_pdp_add()
1013 &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx); in gtp_pdp_add()
1017 return pctx; in gtp_pdp_add()
1022 struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head); in pdp_context_free() local
1024 sock_put(pctx->sk); in pdp_context_free()
1025 kfree(pctx); in pdp_context_free()
1028 static void pdp_context_delete(struct pdp_ctx *pctx) in pdp_context_delete() argument
1030 hlist_del_rcu(&pctx->hlist_tid); in pdp_context_delete()
1031 hlist_del_rcu(&pctx->hlist_addr); in pdp_context_delete()
1032 call_rcu(&pctx->rcu_head, pdp_context_free); in pdp_context_delete()
1035 static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation);
1040 struct pdp_ctx *pctx; in gtp_genl_new_pdp() local
1089 pctx = gtp_pdp_add(gtp, sk, info); in gtp_genl_new_pdp()
1090 if (IS_ERR(pctx)) { in gtp_genl_new_pdp()
1091 err = PTR_ERR(pctx); in gtp_genl_new_pdp()
1093 gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL); in gtp_genl_new_pdp()
1129 struct pdp_ctx *pctx; in gtp_find_pdp() local
1132 pctx = gtp_find_pdp_by_link(net, nla); in gtp_find_pdp()
1134 pctx = ERR_PTR(-EINVAL); in gtp_find_pdp()
1136 if (!pctx) in gtp_find_pdp()
1137 pctx = ERR_PTR(-ENOENT); in gtp_find_pdp()
1139 return pctx; in gtp_find_pdp()
1144 struct pdp_ctx *pctx; in gtp_genl_del_pdp() local
1152 pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs); in gtp_genl_del_pdp()
1153 if (IS_ERR(pctx)) { in gtp_genl_del_pdp()
1154 err = PTR_ERR(pctx); in gtp_genl_del_pdp()
1158 if (pctx->gtp_version == GTP_V0) in gtp_genl_del_pdp()
1159 netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n", in gtp_genl_del_pdp()
1160 pctx->u.v0.tid, pctx); in gtp_genl_del_pdp()
1161 else if (pctx->gtp_version == GTP_V1) in gtp_genl_del_pdp()
1162 netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n", in gtp_genl_del_pdp()
1163 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx); in gtp_genl_del_pdp()
1165 gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC); in gtp_genl_del_pdp()
1166 pdp_context_delete(pctx); in gtp_genl_del_pdp()
1184 int flags, u32 type, struct pdp_ctx *pctx) in gtp_genl_fill_info() argument
1193 if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || in gtp_genl_fill_info()
1194 nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) || in gtp_genl_fill_info()
1195 nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || in gtp_genl_fill_info()
1196 nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) in gtp_genl_fill_info()
1199 switch (pctx->gtp_version) { in gtp_genl_fill_info()
1201 if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) || in gtp_genl_fill_info()
1202 nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow)) in gtp_genl_fill_info()
1206 if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) || in gtp_genl_fill_info()
1207 nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei)) in gtp_genl_fill_info()
1220 static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation) in gtp_tunnel_notify() argument
1229 ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx); in gtp_tunnel_notify()
1235 ret = genlmsg_multicast_netns(>p_genl_family, dev_net(pctx->dev), msg, in gtp_tunnel_notify()
1242 struct pdp_ctx *pctx = NULL; in gtp_genl_get_pdp() local
1251 pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs); in gtp_genl_get_pdp()
1252 if (IS_ERR(pctx)) { in gtp_genl_get_pdp()
1253 err = PTR_ERR(pctx); in gtp_genl_get_pdp()
1264 0, info->nlhdr->nlmsg_type, pctx); in gtp_genl_get_pdp()
1284 struct pdp_ctx *pctx; in gtp_genl_dump_pdp() local
1301 hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], in gtp_genl_dump_pdp()
1308 cb->nlh->nlmsg_type, pctx)) { in gtp_genl_dump_pdp()