@@ -763,7 +763,7 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
763
763
if (!sock_owned_by_user (sk ))
764
764
__mptcp_error_report (sk );
765
765
else
766
- set_bit (MPTCP_ERROR_REPORT , & msk -> flags );
766
+ __set_bit (MPTCP_ERROR_REPORT , & msk -> cb_flags );
767
767
}
768
768
769
769
/* If the moves have caught up with the DATA_FIN sequence number
@@ -1517,9 +1517,8 @@ static void mptcp_update_post_push(struct mptcp_sock *msk,
1517
1517
1518
1518
void mptcp_check_and_set_pending (struct sock * sk )
1519
1519
{
1520
- if (mptcp_send_head (sk ) &&
1521
- !test_bit (MPTCP_PUSH_PENDING , & mptcp_sk (sk )-> flags ))
1522
- set_bit (MPTCP_PUSH_PENDING , & mptcp_sk (sk )-> flags );
1520
+ if (mptcp_send_head (sk ))
1521
+ mptcp_sk (sk )-> push_pending |= BIT (MPTCP_PUSH_PENDING );
1523
1522
}
1524
1523
1525
1524
void __mptcp_push_pending (struct sock * sk , unsigned int flags )
@@ -2134,7 +2133,7 @@ static void mptcp_retransmit_timer(struct timer_list *t)
2134
2133
mptcp_schedule_work (sk );
2135
2134
} else {
2136
2135
/* delegate our work to tcp_release_cb() */
2137
- set_bit (MPTCP_RETRANSMIT , & msk -> flags );
2136
+ __set_bit (MPTCP_RETRANSMIT , & msk -> cb_flags );
2138
2137
}
2139
2138
bh_unlock_sock (sk );
2140
2139
sock_put (sk );
@@ -2840,7 +2839,9 @@ static int mptcp_disconnect(struct sock *sk, int flags)
2840
2839
2841
2840
mptcp_destroy_common (msk );
2842
2841
msk -> last_snd = NULL ;
2843
- msk -> flags = 0 ;
2842
+ WRITE_ONCE (msk -> flags , 0 );
2843
+ msk -> cb_flags = 0 ;
2844
+ msk -> push_pending = 0 ;
2844
2845
msk -> recovery = false;
2845
2846
msk -> can_ack = false;
2846
2847
msk -> fully_established = false;
@@ -3021,7 +3022,7 @@ void __mptcp_data_acked(struct sock *sk)
3021
3022
if (!sock_owned_by_user (sk ))
3022
3023
__mptcp_clean_una (sk );
3023
3024
else
3024
- set_bit (MPTCP_CLEAN_UNA , & mptcp_sk (sk )-> flags );
3025
+ __set_bit (MPTCP_CLEAN_UNA , & mptcp_sk (sk )-> cb_flags );
3025
3026
3026
3027
if (mptcp_pending_data_fin_ack (sk ))
3027
3028
mptcp_schedule_work (sk );
@@ -3040,22 +3041,23 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
3040
3041
else if (xmit_ssk )
3041
3042
mptcp_subflow_delegate (mptcp_subflow_ctx (xmit_ssk ), MPTCP_DELEGATE_SEND );
3042
3043
} else {
3043
- set_bit (MPTCP_PUSH_PENDING , & mptcp_sk (sk )-> flags );
3044
+ __set_bit (MPTCP_PUSH_PENDING , & mptcp_sk (sk )-> cb_flags );
3044
3045
}
3045
3046
}
3046
3047
3048
+ #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
3049
+ BIT(MPTCP_RETRANSMIT) | \
3050
+ BIT(MPTCP_FLUSH_JOIN_LIST))
3051
+
3047
3052
/* processes deferred events and flush wmem */
3048
3053
static void mptcp_release_cb (struct sock * sk )
3054
+ __must_hold (& sk - > sk_lock .slock )
3049
3055
{
3056
+ struct mptcp_sock * msk = mptcp_sk (sk );
3057
+
3050
3058
for (;;) {
3051
- unsigned long flags = 0 ;
3052
-
3053
- if (test_and_clear_bit (MPTCP_PUSH_PENDING , & mptcp_sk (sk )-> flags ))
3054
- flags |= BIT (MPTCP_PUSH_PENDING );
3055
- if (test_and_clear_bit (MPTCP_RETRANSMIT , & mptcp_sk (sk )-> flags ))
3056
- flags |= BIT (MPTCP_RETRANSMIT );
3057
- if (test_and_clear_bit (MPTCP_FLUSH_JOIN_LIST , & mptcp_sk (sk )-> flags ))
3058
- flags |= BIT (MPTCP_FLUSH_JOIN_LIST );
3059
+ unsigned long flags = (msk -> cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED ) |
3060
+ msk -> push_pending ;
3059
3061
if (!flags )
3060
3062
break ;
3061
3063
@@ -3066,7 +3068,8 @@ static void mptcp_release_cb(struct sock *sk)
3066
3068
* datapath acquires the msk socket spinlock while helding
3067
3069
* the subflow socket lock
3068
3070
*/
3069
-
3071
+ msk -> push_pending = 0 ;
3072
+ msk -> cb_flags &= ~flags ;
3070
3073
spin_unlock_bh (& sk -> sk_lock .slock );
3071
3074
if (flags & BIT (MPTCP_FLUSH_JOIN_LIST ))
3072
3075
__mptcp_flush_join_list (sk );
@@ -3082,11 +3085,11 @@ static void mptcp_release_cb(struct sock *sk)
3082
3085
/* be sure to set the current sk state before tacking actions
3083
3086
* depending on sk_state
3084
3087
*/
3085
- if (test_and_clear_bit (MPTCP_CONNECTED , & mptcp_sk ( sk ) -> flags ))
3088
+ if (__test_and_clear_bit (MPTCP_CONNECTED , & msk -> cb_flags ))
3086
3089
__mptcp_set_connected (sk );
3087
- if (test_and_clear_bit (MPTCP_CLEAN_UNA , & mptcp_sk ( sk ) -> flags ))
3090
+ if (__test_and_clear_bit (MPTCP_CLEAN_UNA , & msk -> cb_flags ))
3088
3091
__mptcp_clean_una_wakeup (sk );
3089
- if (test_and_clear_bit (MPTCP_ERROR_REPORT , & mptcp_sk ( sk ) -> flags ))
3092
+ if (__test_and_clear_bit (MPTCP_ERROR_REPORT , & msk -> cb_flags ))
3090
3093
__mptcp_error_report (sk );
3091
3094
3092
3095
__mptcp_update_rmem (sk );
@@ -3128,7 +3131,7 @@ void mptcp_subflow_process_delegated(struct sock *ssk)
3128
3131
if (!sock_owned_by_user (sk ))
3129
3132
__mptcp_subflow_push_pending (sk , ssk );
3130
3133
else
3131
- set_bit (MPTCP_PUSH_PENDING , & mptcp_sk (sk )-> flags );
3134
+ __set_bit (MPTCP_PUSH_PENDING , & mptcp_sk (sk )-> cb_flags );
3132
3135
mptcp_data_unlock (sk );
3133
3136
mptcp_subflow_delegated_done (subflow , MPTCP_DELEGATE_SEND );
3134
3137
}
@@ -3247,7 +3250,7 @@ bool mptcp_finish_join(struct sock *ssk)
3247
3250
} else {
3248
3251
sock_hold (ssk );
3249
3252
list_add_tail (& subflow -> node , & msk -> join_list );
3250
- set_bit (MPTCP_FLUSH_JOIN_LIST , & msk -> flags );
3253
+ __set_bit (MPTCP_FLUSH_JOIN_LIST , & msk -> cb_flags );
3251
3254
}
3252
3255
mptcp_data_unlock (parent );
3253
3256
0 commit comments