Re: [PATCH v3 net] ipv6: Fix dangling pointer when ipv6 fragment

From: Martin Lau
Date: Tue Apr 02 2019 - 11:36:03 EST


On Tue, Apr 02, 2019 at 06:49:03PM +0800, kbuild test robot wrote:
> Hi hujunwei,
>
> Thank you for the patch! Perhaps something to improve:
>
> [auto build test WARNING on net/master]
>
> url: https://github.com/0day-ci/linux/commits/hujunwei/ipv6-Fix-dangling-pointer-when-ipv6-fragment/20190402-175602
> config: i386-randconfig-x005-201913 (attached as .config)
> compiler: gcc-7 (Debian 7.3.0-1) 7.3.0
> reproduce:
> # save the attached .config to linux build tree
> make ARCH=i386
>
> All warnings (new ones prefixed by >>):
>
> net//ipv6/ip6_output.c: In function 'ip6_fragment':
> >> net//ipv6/ip6_output.c:609:27: warning: 'prevhdr' is used uninitialized in this function [-Wuninitialized]
> nexthdr_offset = prevhdr - skb_network_header(skb);
> ~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~
>
> vim +/prevhdr +609 net//ipv6/ip6_output.c
>
> 594
> 595 int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
> 596 int (*output)(struct net *, struct sock *, struct sk_buff *))
> 597 {
> 598 struct sk_buff *frag;
> 599 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
> 600 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
> 601 inet6_sk(skb->sk) : NULL;
> 602 struct ipv6hdr *tmp_hdr;
> 603 struct frag_hdr *fh;
> 604 unsigned int mtu, hlen, left, len, nexthdr_offset;
> 605 int hroom, troom;
> 606 __be32 frag_id;
> 607 int ptr, offset = 0, err = 0;
> 608 u8 *prevhdr, nexthdr = 0;
> > 609 nexthdr_offset = prevhdr - skb_network_header(skb);
hmm... This line has been moved up since v2. :(

> 610
> 611 err = ip6_find_1stfragopt(skb, &prevhdr);
> 612 if (err < 0)
> 613 goto fail;
> 614 hlen = err;
> 615 nexthdr = *prevhdr;
> 616
> 617 mtu = ip6_skb_dst_mtu(skb);
> 618
> 619 /* We must not fragment if the socket is set to force MTU discovery
> 620 * or if the skb it not generated by a local socket.
> 621 */
> 622 if (unlikely(!skb->ignore_df && skb->len > mtu))
> 623 goto fail_toobig;
> 624
> 625 if (IP6CB(skb)->frag_max_size) {
> 626 if (IP6CB(skb)->frag_max_size > mtu)
> 627 goto fail_toobig;
> 628
> 629 /* don't send fragments larger than what we received */
> 630 mtu = IP6CB(skb)->frag_max_size;
> 631 if (mtu < IPV6_MIN_MTU)
> 632 mtu = IPV6_MIN_MTU;
> 633 }
> 634
> 635 if (np && np->frag_size < mtu) {
> 636 if (np->frag_size)
> 637 mtu = np->frag_size;
> 638 }
> 639 if (mtu < hlen + sizeof(struct frag_hdr) + 8)
> 640 goto fail_toobig;
> 641 mtu -= hlen + sizeof(struct frag_hdr);
> 642
> 643 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
> 644 &ipv6_hdr(skb)->saddr);
> 645
> 646 if (skb->ip_summed == CHECKSUM_PARTIAL &&
> 647 (err = skb_checksum_help(skb)))
> 648 goto fail;
> 649
> 650 prevhdr = skb_network_header(skb) + nexthdr_offset;
> 651 hroom = LL_RESERVED_SPACE(rt->dst.dev);
> 652 if (skb_has_frag_list(skb)) {
> 653 unsigned int first_len = skb_pagelen(skb);
> 654 struct sk_buff *frag2;
> 655
> 656 if (first_len - hlen > mtu ||
> 657 ((first_len - hlen) & 7) ||
> 658 skb_cloned(skb) ||
> 659 skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
> 660 goto slow_path;
> 661
> 662 skb_walk_frags(skb, frag) {
> 663 /* Correct geometry. */
> 664 if (frag->len > mtu ||
> 665 ((frag->len & 7) && frag->next) ||
> 666 skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
> 667 goto slow_path_clean;
> 668
> 669 /* Partially cloned skb? */
> 670 if (skb_shared(frag))
> 671 goto slow_path_clean;
> 672
> 673 BUG_ON(frag->sk);
> 674 if (skb->sk) {
> 675 frag->sk = skb->sk;
> 676 frag->destructor = sock_wfree;
> 677 }
> 678 skb->truesize -= frag->truesize;
> 679 }
> 680
> 681 err = 0;
> 682 offset = 0;
> 683 /* BUILD HEADER */
> 684
> 685 *prevhdr = NEXTHDR_FRAGMENT;
> 686 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
> 687 if (!tmp_hdr) {
> 688 err = -ENOMEM;
> 689 goto fail;
> 690 }
> 691 frag = skb_shinfo(skb)->frag_list;
> 692 skb_frag_list_init(skb);
> 693
> 694 __skb_pull(skb, hlen);
> 695 fh = __skb_push(skb, sizeof(struct frag_hdr));
> 696 __skb_push(skb, hlen);
> 697 skb_reset_network_header(skb);
> 698 memcpy(skb_network_header(skb), tmp_hdr, hlen);
> 699
> 700 fh->nexthdr = nexthdr;
> 701 fh->reserved = 0;
> 702 fh->frag_off = htons(IP6_MF);
> 703 fh->identification = frag_id;
> 704
> 705 first_len = skb_pagelen(skb);
> 706 skb->data_len = first_len - skb_headlen(skb);
> 707 skb->len = first_len;
> 708 ipv6_hdr(skb)->payload_len = htons(first_len -
> 709 sizeof(struct ipv6hdr));
> 710
> 711 for (;;) {
> 712 /* Prepare header of the next frame,
> 713 * before previous one went down. */
> 714 if (frag) {
> 715 frag->ip_summed = CHECKSUM_NONE;
> 716 skb_reset_transport_header(frag);
> 717 fh = __skb_push(frag, sizeof(struct frag_hdr));
> 718 __skb_push(frag, hlen);
> 719 skb_reset_network_header(frag);
> 720 memcpy(skb_network_header(frag), tmp_hdr,
> 721 hlen);
> 722 offset += skb->len - hlen - sizeof(struct frag_hdr);
> 723 fh->nexthdr = nexthdr;
> 724 fh->reserved = 0;
> 725 fh->frag_off = htons(offset);
> 726 if (frag->next)
> 727 fh->frag_off |= htons(IP6_MF);
> 728 fh->identification = frag_id;
> 729 ipv6_hdr(frag)->payload_len =
> 730 htons(frag->len -
> 731 sizeof(struct ipv6hdr));
> 732 ip6_copy_metadata(frag, skb);
> 733 }
> 734
> 735 err = output(net, sk, skb);
> 736 if (!err)
> 737 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
> 738 IPSTATS_MIB_FRAGCREATES);
> 739
> 740 if (err || !frag)
> 741 break;
> 742
> 743 skb = frag;
> 744 frag = skb->next;
> 745 skb_mark_not_on_list(skb);
> 746 }
> 747
> 748 kfree(tmp_hdr);
> 749
> 750 if (err == 0) {
> 751 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
> 752 IPSTATS_MIB_FRAGOKS);
> 753 return 0;
> 754 }
> 755
> 756 kfree_skb_list(frag);
> 757
> 758 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
> 759 IPSTATS_MIB_FRAGFAILS);
> 760 return err;
> 761
> 762 slow_path_clean:
> 763 skb_walk_frags(skb, frag2) {
> 764 if (frag2 == frag)
> 765 break;
> 766 frag2->sk = NULL;
> 767 frag2->destructor = NULL;
> 768 skb->truesize += frag2->truesize;
> 769 }
> 770 }
> 771
> 772 slow_path:
> 773 left = skb->len - hlen; /* Space per frame */
> 774 ptr = hlen; /* Where to start from */
> 775
> 776 /*
> 777 * Fragment the datagram.
> 778 */
> 779
> 780 troom = rt->dst.dev->needed_tailroom;
> 781
> 782 /*
> 783 * Keep copying data until we run out.
> 784 */
> 785 while (left > 0) {
> 786 u8 *fragnexthdr_offset;
> 787
> 788 len = left;
> 789 /* IF: it doesn't fit, use 'mtu' - the data space left */
> 790 if (len > mtu)
> 791 len = mtu;
> 792 /* IF: we are not sending up to and including the packet end
> 793 then align the next start on an eight byte boundary */
> 794 if (len < left) {
> 795 len &= ~7;
> 796 }
> 797
> 798 /* Allocate buffer */
> 799 frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
> 800 hroom + troom, GFP_ATOMIC);
> 801 if (!frag) {
> 802 err = -ENOMEM;
> 803 goto fail;
> 804 }
> 805
> 806 /*
> 807 * Set up data on packet
> 808 */
> 809
> 810 ip6_copy_metadata(frag, skb);
> 811 skb_reserve(frag, hroom);
> 812 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
> 813 skb_reset_network_header(frag);
> 814 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
> 815 frag->transport_header = (frag->network_header + hlen +
> 816 sizeof(struct frag_hdr));
> 817
> 818 /*
> 819 * Charge the memory for the fragment to any owner
> 820 * it might possess
> 821 */
> 822 if (skb->sk)
> 823 skb_set_owner_w(frag, skb->sk);
> 824
> 825 /*
> 826 * Copy the packet header into the new buffer.
> 827 */
> 828 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
> 829
> 830 fragnexthdr_offset = skb_network_header(frag);
> 831 fragnexthdr_offset += prevhdr - skb_network_header(skb);
> 832 *fragnexthdr_offset = NEXTHDR_FRAGMENT;
> 833
> 834 /*
> 835 * Build fragment header.
> 836 */
> 837 fh->nexthdr = nexthdr;
> 838 fh->reserved = 0;
> 839 fh->identification = frag_id;
> 840
> 841 /*
> 842 * Copy a block of the IP datagram.
> 843 */
> 844 BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
> 845 len));
> 846 left -= len;
> 847
> 848 fh->frag_off = htons(offset);
> 849 if (left > 0)
> 850 fh->frag_off |= htons(IP6_MF);
> 851 ipv6_hdr(frag)->payload_len = htons(frag->len -
> 852 sizeof(struct ipv6hdr));
> 853
> 854 ptr += len;
> 855 offset += len;
> 856
> 857 /*
> 858 * Put this fragment into the sending queue.
> 859 */
> 860 err = output(net, sk, frag);
> 861 if (err)
> 862 goto fail;
> 863
> 864 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
> 865 IPSTATS_MIB_FRAGCREATES);
> 866 }
> 867 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
> 868 IPSTATS_MIB_FRAGOKS);
> 869 consume_skb(skb);
> 870 return err;
> 871
> 872 fail_toobig:
> 873 if (skb->sk && dst_allfrag(skb_dst(skb)))
> 874 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
> 875
> 876 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
> 877 err = -EMSGSIZE;
> 878
> 879 fail:
> 880 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
> 881 IPSTATS_MIB_FRAGFAILS);
> 882 kfree_skb(skb);
> 883 return err;
> 884 }
> 885
>
> ---
> 0-DAY kernel test infrastructure Open Source Technology Center
> https://urldefense.proofpoint.com/v2/url?u=https-3A__lists.01.org_pipermail_kbuild-2Dall&d=DwIBAg&c=5VD0RTtNlTh3ycd41b3MUw&r=VQnoQ7LvghIj0gVEaiQSUw&m=VM2d9MD3GON8eQRY0bYRU7OGyFCoSiaFiYJJa6-3rDU&s=p3p0GnDqN_d4cCUIgH993sIXi_Sw8tUInnni3uMPXKk&e= Intel Corporation