1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * X.25 Packet Layer release 002
4 *
5 * This is ALPHA test software. This code may break your machine, randomly fail to work with new
6 * releases, misbehave and/or generally screw up. It might even work.
7 *
8 * This code REQUIRES 2.1.15 or higher
9 *
10 * History
11 * X.25 001 Jonathan Naylor Started coding.
12 * 2000-09-04 Henner Eisen Prevent freeing a dangling skb.
13 */
14
15 #define pr_fmt(fmt) "X25: " fmt
16
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/skbuff.h>
20 #include <linux/slab.h>
21 #include <net/sock.h>
22 #include <linux/if_arp.h>
23 #include <net/x25.h>
24 #include <net/x25device.h>
25
x25_receive_data(struct sk_buff * skb,struct x25_neigh * nb)26 static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
27 {
28 struct sock *sk;
29 unsigned short frametype;
30 unsigned int lci;
31
32 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
33 return 0;
34
35 frametype = skb->data[2];
36 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
37
38 /*
39 * LCI of zero is always for us, and its always a link control
40 * frame.
41 */
42 if (lci == 0) {
43 x25_link_control(skb, nb, frametype);
44 return 0;
45 }
46
47 /*
48 * Find an existing socket.
49 */
50 if ((sk = x25_find_socket(lci, nb)) != NULL) {
51 int queued = 1;
52
53 skb_reset_transport_header(skb);
54 bh_lock_sock(sk);
55 if (!sock_owned_by_user(sk)) {
56 queued = x25_process_rx_frame(sk, skb);
57 } else {
58 queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
59 }
60 bh_unlock_sock(sk);
61 sock_put(sk);
62 return queued;
63 }
64
65 /*
66 * Is is a Call Request ? if so process it.
67 */
68 if (frametype == X25_CALL_REQUEST)
69 return x25_rx_call_request(skb, nb, lci);
70
71 /*
72 * Its not a Call Request, nor is it a control frame.
73 * Can we forward it?
74 */
75
76 if (x25_forward_data(lci, nb, skb)) {
77 if (frametype == X25_CLEAR_CONFIRMATION) {
78 x25_clear_forward_by_lci(lci);
79 }
80 kfree_skb(skb);
81 return 1;
82 }
83
84 /*
85 x25_transmit_clear_request(nb, lci, 0x0D);
86 */
87
88 if (frametype != X25_CLEAR_CONFIRMATION)
89 pr_debug("x25_receive_data(): unknown frame type %2x\n",frametype);
90
91 return 0;
92 }
93
x25_lapb_receive_frame(struct sk_buff * skb,struct net_device * dev,struct packet_type * ptype,struct net_device * orig_dev)94 int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
95 struct packet_type *ptype, struct net_device *orig_dev)
96 {
97 struct sk_buff *nskb;
98 struct x25_neigh *nb;
99
100 if (!net_eq(dev_net(dev), &init_net))
101 goto drop;
102
103 nskb = skb_copy(skb, GFP_ATOMIC);
104 if (!nskb)
105 goto drop;
106 kfree_skb(skb);
107 skb = nskb;
108
109 /*
110 * Packet received from unrecognised device, throw it away.
111 */
112 nb = x25_get_neigh(dev);
113 if (!nb) {
114 pr_debug("unknown neighbour - %s\n", dev->name);
115 goto drop;
116 }
117
118 if (!pskb_may_pull(skb, 1))
119 return 0;
120
121 switch (skb->data[0]) {
122
123 case X25_IFACE_DATA:
124 skb_pull(skb, 1);
125 if (x25_receive_data(skb, nb)) {
126 x25_neigh_put(nb);
127 goto out;
128 }
129 break;
130
131 case X25_IFACE_CONNECT:
132 x25_link_established(nb);
133 break;
134
135 case X25_IFACE_DISCONNECT:
136 x25_link_terminated(nb);
137 break;
138 }
139 x25_neigh_put(nb);
140 drop:
141 kfree_skb(skb);
142 out:
143 return 0;
144 }
145
x25_establish_link(struct x25_neigh * nb)146 void x25_establish_link(struct x25_neigh *nb)
147 {
148 struct sk_buff *skb;
149 unsigned char *ptr;
150
151 switch (nb->dev->type) {
152 case ARPHRD_X25:
153 if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) {
154 pr_err("x25_dev: out of memory\n");
155 return;
156 }
157 ptr = skb_put(skb, 1);
158 *ptr = X25_IFACE_CONNECT;
159 break;
160
161 #if IS_ENABLED(CONFIG_LLC)
162 case ARPHRD_ETHER:
163 return;
164 #endif
165 default:
166 return;
167 }
168
169 skb->protocol = htons(ETH_P_X25);
170 skb->dev = nb->dev;
171
172 dev_queue_xmit(skb);
173 }
174
x25_terminate_link(struct x25_neigh * nb)175 void x25_terminate_link(struct x25_neigh *nb)
176 {
177 struct sk_buff *skb;
178 unsigned char *ptr;
179
180 #if IS_ENABLED(CONFIG_LLC)
181 if (nb->dev->type == ARPHRD_ETHER)
182 return;
183 #endif
184 if (nb->dev->type != ARPHRD_X25)
185 return;
186
187 skb = alloc_skb(1, GFP_ATOMIC);
188 if (!skb) {
189 pr_err("x25_dev: out of memory\n");
190 return;
191 }
192
193 ptr = skb_put(skb, 1);
194 *ptr = X25_IFACE_DISCONNECT;
195
196 skb->protocol = htons(ETH_P_X25);
197 skb->dev = nb->dev;
198 dev_queue_xmit(skb);
199 }
200
x25_send_frame(struct sk_buff * skb,struct x25_neigh * nb)201 void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
202 {
203 unsigned char *dptr;
204
205 skb_reset_network_header(skb);
206
207 switch (nb->dev->type) {
208 case ARPHRD_X25:
209 dptr = skb_push(skb, 1);
210 *dptr = X25_IFACE_DATA;
211 break;
212
213 #if IS_ENABLED(CONFIG_LLC)
214 case ARPHRD_ETHER:
215 kfree_skb(skb);
216 return;
217 #endif
218 default:
219 kfree_skb(skb);
220 return;
221 }
222
223 skb->protocol = htons(ETH_P_X25);
224 skb->dev = nb->dev;
225
226 dev_queue_xmit(skb);
227 }
228