@@ -103,30 +103,24 @@ static void stats_read(struct flow_stats *stats,
103
103
void ovs_flow_stats_get (struct sw_flow * flow , struct ovs_flow_stats * ovs_stats ,
104
104
unsigned long * used , __be16 * tcp_flags )
105
105
{
106
- int cpu , cur_cpu ;
106
+ int cpu ;
107
107
108
108
* used = 0 ;
109
109
* tcp_flags = 0 ;
110
110
memset (ovs_stats , 0 , sizeof (* ovs_stats ));
111
111
112
+ local_bh_disable ();
112
113
if (!flow -> stats .is_percpu ) {
113
114
stats_read (flow -> stats .stat , ovs_stats , used , tcp_flags );
114
115
} else {
115
- cur_cpu = get_cpu ();
116
116
for_each_possible_cpu (cpu ) {
117
117
struct flow_stats * stats ;
118
118
119
- if (cpu == cur_cpu )
120
- local_bh_disable ();
121
-
122
119
stats = per_cpu_ptr (flow -> stats .cpu_stats , cpu );
123
120
stats_read (stats , ovs_stats , used , tcp_flags );
124
-
125
- if (cpu == cur_cpu )
126
- local_bh_enable ();
127
121
}
128
- put_cpu ();
129
122
}
123
+ local_bh_enable ();
130
124
}
131
125
132
126
static void stats_reset (struct flow_stats * stats )
@@ -141,25 +135,17 @@ static void stats_reset(struct flow_stats *stats)
141
135
142
136
void ovs_flow_stats_clear (struct sw_flow * flow )
143
137
{
144
- int cpu , cur_cpu ;
138
+ int cpu ;
145
139
140
+ local_bh_disable ();
146
141
if (!flow -> stats .is_percpu ) {
147
142
stats_reset (flow -> stats .stat );
148
143
} else {
149
- cur_cpu = get_cpu ();
150
-
151
144
for_each_possible_cpu (cpu ) {
152
-
153
- if (cpu == cur_cpu )
154
- local_bh_disable ();
155
-
156
145
stats_reset (per_cpu_ptr (flow -> stats .cpu_stats , cpu ));
157
-
158
- if (cpu == cur_cpu )
159
- local_bh_enable ();
160
146
}
161
- put_cpu ();
162
147
}
148
+ local_bh_enable ();
163
149
}
164
150
165
151
static int check_header (struct sk_buff * skb , int len )
0 commit comments