root/net/x25/x25_link.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. x25_start_t20timer
  2. x25_t20timer_expiry
  3. x25_stop_t20timer
  4. x25_t20timer_pending
  5. x25_link_control
  6. x25_transmit_restart_request
  7. x25_transmit_restart_confirmation
  8. x25_transmit_clear_request
  9. x25_transmit_link
  10. x25_link_established
  11. x25_link_terminated
  12. x25_link_device_up
  13. __x25_remove_neigh
  14. x25_link_device_down
  15. x25_get_neigh
  16. x25_subscr_ioctl
  17. x25_link_free

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  *      X.25 Packet Layer release 002
   4  *
   5  *      This is ALPHA test software. This code may break your machine,
   6  *      randomly fail to work with new releases, misbehave and/or generally
   7  *      screw up. It might even work.
   8  *
   9  *      This code REQUIRES 2.1.15 or higher
  10  *
  11  *      History
  12  *      X.25 001        Jonathan Naylor   Started coding.
  13  *      X.25 002        Jonathan Naylor   New timer architecture.
  14  *      mar/20/00       Daniela Squassoni Disabling/enabling of facilities
  15  *                                        negotiation.
  16  *      2000-09-04      Henner Eisen      dev_hold() / dev_put() for x25_neigh.
  17  */
  18 
  19 #define pr_fmt(fmt) "X25: " fmt
  20 
  21 #include <linux/kernel.h>
  22 #include <linux/jiffies.h>
  23 #include <linux/timer.h>
  24 #include <linux/slab.h>
  25 #include <linux/netdevice.h>
  26 #include <linux/skbuff.h>
  27 #include <linux/uaccess.h>
  28 #include <linux/init.h>
  29 #include <net/x25.h>
  30 
  31 LIST_HEAD(x25_neigh_list);
  32 DEFINE_RWLOCK(x25_neigh_list_lock);
  33 
  34 static void x25_t20timer_expiry(struct timer_list *);
  35 
  36 static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
  37 static void x25_transmit_restart_request(struct x25_neigh *nb);
  38 
  39 /*
  40  *      Linux set/reset timer routines
  41  */
  42 static inline void x25_start_t20timer(struct x25_neigh *nb)
  43 {
  44         mod_timer(&nb->t20timer, jiffies + nb->t20);
  45 }
  46 
  47 static void x25_t20timer_expiry(struct timer_list *t)
  48 {
  49         struct x25_neigh *nb = from_timer(nb, t, t20timer);
  50 
  51         x25_transmit_restart_request(nb);
  52 
  53         x25_start_t20timer(nb);
  54 }
  55 
  56 static inline void x25_stop_t20timer(struct x25_neigh *nb)
  57 {
  58         del_timer(&nb->t20timer);
  59 }
  60 
  61 static inline int x25_t20timer_pending(struct x25_neigh *nb)
  62 {
  63         return timer_pending(&nb->t20timer);
  64 }
  65 
  66 /*
  67  *      This handles all restart and diagnostic frames.
  68  */
  69 void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
  70                       unsigned short frametype)
  71 {
  72         struct sk_buff *skbn;
  73         int confirm;
  74 
  75         switch (frametype) {
  76         case X25_RESTART_REQUEST:
  77                 confirm = !x25_t20timer_pending(nb);
  78                 x25_stop_t20timer(nb);
  79                 nb->state = X25_LINK_STATE_3;
  80                 if (confirm)
  81                         x25_transmit_restart_confirmation(nb);
  82                 break;
  83 
  84         case X25_RESTART_CONFIRMATION:
  85                 x25_stop_t20timer(nb);
  86                 nb->state = X25_LINK_STATE_3;
  87                 break;
  88 
  89         case X25_DIAGNOSTIC:
  90                 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
  91                         break;
  92 
  93                 pr_warn("diagnostic #%d - %02X %02X %02X\n",
  94                        skb->data[3], skb->data[4],
  95                        skb->data[5], skb->data[6]);
  96                 break;
  97 
  98         default:
  99                 pr_warn("received unknown %02X with LCI 000\n",
 100                        frametype);
 101                 break;
 102         }
 103 
 104         if (nb->state == X25_LINK_STATE_3)
 105                 while ((skbn = skb_dequeue(&nb->queue)) != NULL)
 106                         x25_send_frame(skbn, nb);
 107 }
 108 
 109 /*
 110  *      This routine is called when a Restart Request is needed
 111  */
 112 static void x25_transmit_restart_request(struct x25_neigh *nb)
 113 {
 114         unsigned char *dptr;
 115         int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
 116         struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
 117 
 118         if (!skb)
 119                 return;
 120 
 121         skb_reserve(skb, X25_MAX_L2_LEN);
 122 
 123         dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
 124 
 125         *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
 126         *dptr++ = 0x00;
 127         *dptr++ = X25_RESTART_REQUEST;
 128         *dptr++ = 0x00;
 129         *dptr++ = 0;
 130 
 131         skb->sk = NULL;
 132 
 133         x25_send_frame(skb, nb);
 134 }
 135 
 136 /*
 137  * This routine is called when a Restart Confirmation is needed
 138  */
 139 static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
 140 {
 141         unsigned char *dptr;
 142         int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
 143         struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
 144 
 145         if (!skb)
 146                 return;
 147 
 148         skb_reserve(skb, X25_MAX_L2_LEN);
 149 
 150         dptr = skb_put(skb, X25_STD_MIN_LEN);
 151 
 152         *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
 153         *dptr++ = 0x00;
 154         *dptr++ = X25_RESTART_CONFIRMATION;
 155 
 156         skb->sk = NULL;
 157 
 158         x25_send_frame(skb, nb);
 159 }
 160 
 161 /*
 162  *      This routine is called when a Clear Request is needed outside of the context
 163  *      of a connected socket.
 164  */
 165 void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
 166                                 unsigned char cause)
 167 {
 168         unsigned char *dptr;
 169         int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
 170         struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
 171 
 172         if (!skb)
 173                 return;
 174 
 175         skb_reserve(skb, X25_MAX_L2_LEN);
 176 
 177         dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
 178 
 179         *dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
 180                                          X25_GFI_EXTSEQ :
 181                                          X25_GFI_STDSEQ);
 182         *dptr++ = (lci >> 0) & 0xFF;
 183         *dptr++ = X25_CLEAR_REQUEST;
 184         *dptr++ = cause;
 185         *dptr++ = 0x00;
 186 
 187         skb->sk = NULL;
 188 
 189         x25_send_frame(skb, nb);
 190 }
 191 
 192 void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
 193 {
 194         switch (nb->state) {
 195         case X25_LINK_STATE_0:
 196                 skb_queue_tail(&nb->queue, skb);
 197                 nb->state = X25_LINK_STATE_1;
 198                 x25_establish_link(nb);
 199                 break;
 200         case X25_LINK_STATE_1:
 201         case X25_LINK_STATE_2:
 202                 skb_queue_tail(&nb->queue, skb);
 203                 break;
 204         case X25_LINK_STATE_3:
 205                 x25_send_frame(skb, nb);
 206                 break;
 207         }
 208 }
 209 
 210 /*
 211  *      Called when the link layer has become established.
 212  */
 213 void x25_link_established(struct x25_neigh *nb)
 214 {
 215         switch (nb->state) {
 216         case X25_LINK_STATE_0:
 217                 nb->state = X25_LINK_STATE_2;
 218                 break;
 219         case X25_LINK_STATE_1:
 220                 x25_transmit_restart_request(nb);
 221                 nb->state = X25_LINK_STATE_2;
 222                 x25_start_t20timer(nb);
 223                 break;
 224         }
 225 }
 226 
 227 /*
 228  *      Called when the link layer has terminated, or an establishment
 229  *      request has failed.
 230  */
 231 
 232 void x25_link_terminated(struct x25_neigh *nb)
 233 {
 234         nb->state = X25_LINK_STATE_0;
 235         /* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
 236         x25_kill_by_neigh(nb);
 237 }
 238 
 239 /*
 240  *      Add a new device.
 241  */
 242 void x25_link_device_up(struct net_device *dev)
 243 {
 244         struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
 245 
 246         if (!nb)
 247                 return;
 248 
 249         skb_queue_head_init(&nb->queue);
 250         timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
 251 
 252         dev_hold(dev);
 253         nb->dev      = dev;
 254         nb->state    = X25_LINK_STATE_0;
 255         nb->extended = 0;
 256         /*
 257          * Enables negotiation
 258          */
 259         nb->global_facil_mask = X25_MASK_REVERSE |
 260                                        X25_MASK_THROUGHPUT |
 261                                        X25_MASK_PACKET_SIZE |
 262                                        X25_MASK_WINDOW_SIZE;
 263         nb->t20      = sysctl_x25_restart_request_timeout;
 264         refcount_set(&nb->refcnt, 1);
 265 
 266         write_lock_bh(&x25_neigh_list_lock);
 267         list_add(&nb->node, &x25_neigh_list);
 268         write_unlock_bh(&x25_neigh_list_lock);
 269 }
 270 
 271 /**
 272  *      __x25_remove_neigh - remove neighbour from x25_neigh_list
 273  *      @nb - neigh to remove
 274  *
 275  *      Remove neighbour from x25_neigh_list. If it was there.
 276  *      Caller must hold x25_neigh_list_lock.
 277  */
 278 static void __x25_remove_neigh(struct x25_neigh *nb)
 279 {
 280         skb_queue_purge(&nb->queue);
 281         x25_stop_t20timer(nb);
 282 
 283         if (nb->node.next) {
 284                 list_del(&nb->node);
 285                 x25_neigh_put(nb);
 286         }
 287 }
 288 
 289 /*
 290  *      A device has been removed, remove its links.
 291  */
 292 void x25_link_device_down(struct net_device *dev)
 293 {
 294         struct x25_neigh *nb;
 295         struct list_head *entry, *tmp;
 296 
 297         write_lock_bh(&x25_neigh_list_lock);
 298 
 299         list_for_each_safe(entry, tmp, &x25_neigh_list) {
 300                 nb = list_entry(entry, struct x25_neigh, node);
 301 
 302                 if (nb->dev == dev) {
 303                         __x25_remove_neigh(nb);
 304                         dev_put(dev);
 305                 }
 306         }
 307 
 308         write_unlock_bh(&x25_neigh_list_lock);
 309 }
 310 
 311 /*
 312  *      Given a device, return the neighbour address.
 313  */
 314 struct x25_neigh *x25_get_neigh(struct net_device *dev)
 315 {
 316         struct x25_neigh *nb, *use = NULL;
 317         struct list_head *entry;
 318 
 319         read_lock_bh(&x25_neigh_list_lock);
 320         list_for_each(entry, &x25_neigh_list) {
 321                 nb = list_entry(entry, struct x25_neigh, node);
 322 
 323                 if (nb->dev == dev) {
 324                         use = nb;
 325                         break;
 326                 }
 327         }
 328 
 329         if (use)
 330                 x25_neigh_hold(use);
 331         read_unlock_bh(&x25_neigh_list_lock);
 332         return use;
 333 }
 334 
 335 /*
 336  *      Handle the ioctls that control the subscription functions.
 337  */
 338 int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
 339 {
 340         struct x25_subscrip_struct x25_subscr;
 341         struct x25_neigh *nb;
 342         struct net_device *dev;
 343         int rc = -EINVAL;
 344 
 345         if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
 346                 goto out;
 347 
 348         rc = -EFAULT;
 349         if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
 350                 goto out;
 351 
 352         rc = -EINVAL;
 353         if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
 354                 goto out;
 355 
 356         if ((nb = x25_get_neigh(dev)) == NULL)
 357                 goto out_dev_put;
 358 
 359         dev_put(dev);
 360 
 361         if (cmd == SIOCX25GSUBSCRIP) {
 362                 read_lock_bh(&x25_neigh_list_lock);
 363                 x25_subscr.extended          = nb->extended;
 364                 x25_subscr.global_facil_mask = nb->global_facil_mask;
 365                 read_unlock_bh(&x25_neigh_list_lock);
 366                 rc = copy_to_user(arg, &x25_subscr,
 367                                   sizeof(x25_subscr)) ? -EFAULT : 0;
 368         } else {
 369                 rc = -EINVAL;
 370                 if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
 371                         rc = 0;
 372                         write_lock_bh(&x25_neigh_list_lock);
 373                         nb->extended         = x25_subscr.extended;
 374                         nb->global_facil_mask = x25_subscr.global_facil_mask;
 375                         write_unlock_bh(&x25_neigh_list_lock);
 376                 }
 377         }
 378         x25_neigh_put(nb);
 379 out:
 380         return rc;
 381 out_dev_put:
 382         dev_put(dev);
 383         goto out;
 384 }
 385 
 386 
 387 /*
 388  *      Release all memory associated with X.25 neighbour structures.
 389  */
 390 void __exit x25_link_free(void)
 391 {
 392         struct x25_neigh *nb;
 393         struct list_head *entry, *tmp;
 394 
 395         write_lock_bh(&x25_neigh_list_lock);
 396 
 397         list_for_each_safe(entry, tmp, &x25_neigh_list) {
 398                 struct net_device *dev;
 399 
 400                 nb = list_entry(entry, struct x25_neigh, node);
 401                 dev = nb->dev;
 402                 __x25_remove_neigh(nb);
 403                 dev_put(dev);
 404         }
 405         write_unlock_bh(&x25_neigh_list_lock);
 406 }

/* [<][>][^][v][top][bottom][index][help] */