net: convert rps_needed and rfs_needed to new static branch api
authorEric Dumazet <edumazet@google.com>
Fri, 22 Mar 2019 15:56:38 +0000 (08:56 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sun, 24 Mar 2019 01:57:38 +0000 (21:57 -0400)
We prefer static_branch_unlikely() over static_key_false() these days.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Acked-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/tun.c
include/linux/netdevice.h
include/net/sock.h
net/core/dev.c
net/core/net-sysfs.c
net/core/sysctl_net_core.c

index 27798aa..24d0220 100644 (file)
@@ -1042,7 +1042,7 @@ static int tun_net_close(struct net_device *dev)
 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
 {
 #ifdef CONFIG_RPS
-       if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
+       if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
                /* Select queue was not called for the skbuff, so we extract the
                 * RPS hash and save it into the flow_table here.
                 */
index 8237622..166fdc0 100644 (file)
@@ -194,8 +194,8 @@ struct net_device_stats {
 
 #ifdef CONFIG_RPS
 #include <linux/static_key.h>
-extern struct static_key rps_needed;
-extern struct static_key rfs_needed;
+extern struct static_key_false rps_needed;
+extern struct static_key_false rfs_needed;
 #endif
 
 struct neighbour;
index 8de5ee2..fecdf63 100644 (file)
@@ -966,7 +966,7 @@ static inline void sock_rps_record_flow_hash(__u32 hash)
 static inline void sock_rps_record_flow(const struct sock *sk)
 {
 #ifdef CONFIG_RPS
-       if (static_key_false(&rfs_needed)) {
+       if (static_branch_unlikely(&rfs_needed)) {
                /* Reading sk->sk_rxhash might incur an expensive cache line
                 * miss.
                 *
index 676c941..9ca2d3a 100644 (file)
@@ -3982,9 +3982,9 @@ EXPORT_SYMBOL(rps_sock_flow_table);
 u32 rps_cpu_mask __read_mostly;
 EXPORT_SYMBOL(rps_cpu_mask);
 
-struct static_key rps_needed __read_mostly;
+struct static_key_false rps_needed __read_mostly;
 EXPORT_SYMBOL(rps_needed);
-struct static_key rfs_needed __read_mostly;
+struct static_key_false rfs_needed __read_mostly;
 EXPORT_SYMBOL(rfs_needed);
 
 static struct rps_dev_flow *
@@ -4510,7 +4510,7 @@ static int netif_rx_internal(struct sk_buff *skb)
        }
 
 #ifdef CONFIG_RPS
-       if (static_key_false(&rps_needed)) {
+       if (static_branch_unlikely(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu;
 
@@ -5179,7 +5179,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
 
        rcu_read_lock();
 #ifdef CONFIG_RPS
-       if (static_key_false(&rps_needed)) {
+       if (static_branch_unlikely(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu = get_rps_cpu(skb->dev, skb, &rflow);
 
@@ -5227,7 +5227,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
 
        rcu_read_lock();
 #ifdef CONFIG_RPS
-       if (static_key_false(&rps_needed)) {
+       if (static_branch_unlikely(&rps_needed)) {
                list_for_each_entry_safe(skb, next, head, list) {
                        struct rps_dev_flow voidflow, *rflow = &voidflow;
                        int cpu = get_rps_cpu(skb->dev, skb, &rflow);
index 4ff661f..851cabb 100644 (file)
@@ -754,9 +754,9 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
        rcu_assign_pointer(queue->rps_map, map);
 
        if (map)
-               static_key_slow_inc(&rps_needed);
+               static_branch_inc(&rps_needed);
        if (old_map)
-               static_key_slow_dec(&rps_needed);
+               static_branch_dec(&rps_needed);
 
        mutex_unlock(&rps_map_mutex);
 
index 84bf286..1a26856 100644 (file)
@@ -95,12 +95,12 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
                if (sock_table != orig_sock_table) {
                        rcu_assign_pointer(rps_sock_flow_table, sock_table);
                        if (sock_table) {
-                               static_key_slow_inc(&rps_needed);
-                               static_key_slow_inc(&rfs_needed);
+                               static_branch_inc(&rps_needed);
+                               static_branch_inc(&rfs_needed);
                        }
                        if (orig_sock_table) {
-                               static_key_slow_dec(&rps_needed);
-                               static_key_slow_dec(&rfs_needed);
+                               static_branch_dec(&rps_needed);
+                               static_branch_dec(&rfs_needed);
                                synchronize_rcu();
                                vfree(orig_sock_table);
                        }