@@ -511,7 +511,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
511511 return skb ;
512512}
513513
514- static int xsk_generic_xmit (struct sock * sk )
514+ static int __xsk_generic_xmit (struct sock * sk )
515515{
516516 struct xdp_sock * xs = xdp_sk (sk );
517517 u32 max_batch = TX_BATCH_SIZE ;
@@ -594,22 +594,13 @@ static int xsk_generic_xmit(struct sock *sk)
594594 return err ;
595595}
596596
597- static int xsk_xmit (struct sock * sk )
597+ static int xsk_generic_xmit (struct sock * sk )
598598{
599- struct xdp_sock * xs = xdp_sk (sk );
600599 int ret ;
601600
602- if (unlikely (!(xs -> dev -> flags & IFF_UP )))
603- return - ENETDOWN ;
604- if (unlikely (!xs -> tx ))
605- return - ENOBUFS ;
606-
607- if (xs -> zc )
608- return xsk_wakeup (xs , XDP_WAKEUP_TX );
609-
610601 /* Drop the RCU lock since the SKB path might sleep. */
611602 rcu_read_unlock ();
612- ret = xsk_generic_xmit (sk );
603+ ret = __xsk_generic_xmit (sk );
613604 /* Reaquire RCU lock before going into common code. */
614605 rcu_read_lock ();
615606
@@ -627,17 +618,31 @@ static bool xsk_no_wakeup(struct sock *sk)
627618#endif
628619}
629620
621+ static int xsk_check_common (struct xdp_sock * xs )
622+ {
623+ if (unlikely (!xsk_is_bound (xs )))
624+ return - ENXIO ;
625+ if (unlikely (!(xs -> dev -> flags & IFF_UP )))
626+ return - ENETDOWN ;
627+
628+ return 0 ;
629+ }
630+
630631static int __xsk_sendmsg (struct socket * sock , struct msghdr * m , size_t total_len )
631632{
632633 bool need_wait = !(m -> msg_flags & MSG_DONTWAIT );
633634 struct sock * sk = sock -> sk ;
634635 struct xdp_sock * xs = xdp_sk (sk );
635636 struct xsk_buff_pool * pool ;
637+ int err ;
636638
637- if (unlikely (!xsk_is_bound (xs )))
638- return - ENXIO ;
639+ err = xsk_check_common (xs );
640+ if (err )
641+ return err ;
639642 if (unlikely (need_wait ))
640643 return - EOPNOTSUPP ;
644+ if (unlikely (!xs -> tx ))
645+ return - ENOBUFS ;
641646
642647 if (sk_can_busy_loop (sk )) {
643648 if (xs -> zc )
@@ -649,8 +654,11 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
649654 return 0 ;
650655
651656 pool = xs -> pool ;
652- if (pool -> cached_need_wakeup & XDP_WAKEUP_TX )
653- return xsk_xmit (sk );
657+ if (pool -> cached_need_wakeup & XDP_WAKEUP_TX ) {
658+ if (xs -> zc )
659+ return xsk_wakeup (xs , XDP_WAKEUP_TX );
660+ return xsk_generic_xmit (sk );
661+ }
654662 return 0 ;
655663}
656664
@@ -670,11 +678,11 @@ static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int
670678 bool need_wait = !(flags & MSG_DONTWAIT );
671679 struct sock * sk = sock -> sk ;
672680 struct xdp_sock * xs = xdp_sk (sk );
681+ int err ;
673682
674- if (unlikely (!xsk_is_bound (xs )))
675- return - ENXIO ;
676- if (unlikely (!(xs -> dev -> flags & IFF_UP )))
677- return - ENETDOWN ;
683+ err = xsk_check_common (xs );
684+ if (err )
685+ return err ;
678686 if (unlikely (!xs -> rx ))
679687 return - ENOBUFS ;
680688 if (unlikely (need_wait ))
@@ -713,21 +721,20 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
713721 sock_poll_wait (file , sock , wait );
714722
715723 rcu_read_lock ();
716- if (unlikely (!xsk_is_bound (xs ))) {
717- rcu_read_unlock ();
718- return mask ;
719- }
724+ if (xsk_check_common (xs ))
725+ goto skip_tx ;
720726
721727 pool = xs -> pool ;
722728
723729 if (pool -> cached_need_wakeup ) {
724730 if (xs -> zc )
725731 xsk_wakeup (xs , pool -> cached_need_wakeup );
726- else
732+ else if ( xs -> tx )
727733 /* Poll needs to drive Tx also in copy mode */
728- xsk_xmit (sk );
734+ xsk_generic_xmit (sk );
729735 }
730736
737+ skip_tx :
731738 if (xs -> rx && !xskq_prod_is_empty (xs -> rx ))
732739 mask |= EPOLLIN | EPOLLRDNORM ;
733740 if (xs -> tx && xsk_tx_writeable (xs ))
0 commit comments