@@ -240,6 +240,8 @@ struct spdk_nvmf_tcp_req {
240
240
* h2c_offset is used when we receive the h2c_data PDU.
241
241
*/
242
242
uint32_t h2c_offset ;
243
+ uint64_t time ;
244
+ bool loged ;
243
245
244
246
STAILQ_ENTRY (spdk_nvmf_tcp_req ) link ;
245
247
TAILQ_ENTRY (spdk_nvmf_tcp_req ) state_link ;
@@ -2859,6 +2861,16 @@ nvmf_tcp_check_fused_ordering(struct spdk_nvmf_tcp_transport *ttransport,
2859
2861
}
2860
2862
}
2861
2863
2864
+ static void check_time (struct spdk_nvmf_tcp_req * tcp_req ) {
2865
+ if (!tcp_req -> loged && tcp_req -> time ) {
2866
+ if (spdk_get_ticks () - tcp_req -> time > spdk_get_ticks_hz () * 2 ){
2867
+ SPDK_NOTICELOG ("tcp_req ttag %d, tcp_req \n" , tcp_req -> ttag );
2868
+ tcp_req -> loged = false;
2869
+ }
2870
+ }
2871
+ }
2872
+
2873
+
2862
2874
static bool
2863
2875
nvmf_tcp_req_process (struct spdk_nvmf_tcp_transport * ttransport ,
2864
2876
struct spdk_nvmf_tcp_req * tcp_req )
@@ -2899,6 +2911,7 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
2899
2911
case TCP_REQUEST_STATE_NEW :
2900
2912
spdk_trace_record (TRACE_TCP_REQUEST_STATE_NEW , tqpair -> qpair .trace_id , 0 , (uintptr_t )tcp_req ,
2901
2913
tqpair -> qpair .queue_depth );
2914
+ tcp_req -> time = spdk_get_ticks ();
2902
2915
2903
2916
/* copy the cmd from the receive pdu */
2904
2917
tcp_req -> cmd = tqpair -> pdu_in_progress -> hdr .capsule_cmd .ccsqe ;
@@ -2947,6 +2960,7 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
2947
2960
case TCP_REQUEST_STATE_NEED_BUFFER :
2948
2961
spdk_trace_record (TRACE_TCP_REQUEST_STATE_NEED_BUFFER , tqpair -> qpair .trace_id , 0 ,
2949
2962
(uintptr_t )tcp_req );
2963
+ check_time (tcp_req );
2950
2964
2951
2965
assert (tcp_req -> req .xfer != SPDK_NVME_DATA_NONE );
2952
2966
@@ -3013,10 +3027,12 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
3013
3027
(uintptr_t )tcp_req );
3014
3028
/* Some external code must kick a request into TCP_REQUEST_STATE_ZCOPY_START_COMPLETED
3015
3029
* to escape this state. */
3030
+ check_time (tcp_req );
3016
3031
break ;
3017
3032
case TCP_REQUEST_STATE_ZCOPY_START_COMPLETED :
3018
3033
spdk_trace_record (TRACE_TCP_REQUEST_STATE_ZCOPY_START_COMPLETED , tqpair -> qpair .trace_id , 0 ,
3019
3034
(uintptr_t )tcp_req );
3035
+ check_time (tcp_req );
3020
3036
if (spdk_unlikely (spdk_nvme_cpl_is_error (& tcp_req -> req .rsp -> nvme_cpl ))) {
3021
3037
SPDK_DEBUGLOG (nvmf_tcp , "Zero-copy start failed for tcp_req(%p) on tqpair=%p\n" ,
3022
3038
tcp_req , tqpair );
@@ -3033,6 +3049,7 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
3033
3049
case TCP_REQUEST_STATE_AWAITING_R2T_ACK :
3034
3050
spdk_trace_record (TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK , tqpair -> qpair .trace_id , 0 ,
3035
3051
(uintptr_t )tcp_req );
3052
+ check_time (tcp_req );
3036
3053
/* The R2T completion or the h2c data incoming will kick it out of this state. */
3037
3054
break ;
3038
3055
case TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER :
@@ -3041,10 +3058,12 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
3041
3058
0 , (uintptr_t )tcp_req );
3042
3059
/* Some external code must kick a request into TCP_REQUEST_STATE_READY_TO_EXECUTE
3043
3060
* to escape this state. */
3061
+ check_time (tcp_req );
3044
3062
break ;
3045
3063
case TCP_REQUEST_STATE_READY_TO_EXECUTE :
3046
3064
spdk_trace_record (TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE , tqpair -> qpair .trace_id , 0 ,
3047
3065
(uintptr_t )tcp_req );
3066
+ check_time (tcp_req );
3048
3067
3049
3068
if (spdk_unlikely (tcp_req -> req .dif_enabled )) {
3050
3069
assert (tcp_req -> req .dif .elba_length >= tcp_req -> req .length );
@@ -3112,25 +3131,29 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
3112
3131
spdk_trace_record (TRACE_TCP_REQUEST_STATE_EXECUTING , tqpair -> qpair .trace_id , 0 , (uintptr_t )tcp_req );
3113
3132
/* Some external code must kick a request into TCP_REQUEST_STATE_EXECUTED
3114
3133
* to escape this state. */
3134
+ check_time (tcp_req );
3115
3135
break ;
3116
3136
case TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT :
3117
3137
spdk_trace_record (TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_COMMIT , tqpair -> qpair .trace_id , 0 ,
3118
3138
(uintptr_t )tcp_req );
3119
3139
/* Some external code must kick a request into TCP_REQUEST_STATE_EXECUTED
3120
3140
* to escape this state. */
3141
+ check_time (tcp_req );
3121
3142
break ;
3122
3143
case TCP_REQUEST_STATE_EXECUTED :
3123
3144
spdk_trace_record (TRACE_TCP_REQUEST_STATE_EXECUTED , tqpair -> qpair .trace_id , 0 , (uintptr_t )tcp_req );
3124
3145
3125
3146
if (spdk_unlikely (tcp_req -> req .dif_enabled )) {
3126
3147
tcp_req -> req .length = tcp_req -> req .dif .orig_length ;
3127
3148
}
3128
-
3149
+ check_time ( tcp_req );
3129
3150
nvmf_tcp_req_set_state (tcp_req , TCP_REQUEST_STATE_READY_TO_COMPLETE );
3130
3151
break ;
3131
3152
case TCP_REQUEST_STATE_READY_TO_COMPLETE :
3132
3153
spdk_trace_record (TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE , tqpair -> qpair .trace_id , 0 ,
3133
3154
(uintptr_t )tcp_req );
3155
+ check_time (tcp_req );
3156
+ tcp_req -> time = 0 ;
3134
3157
if (request_transfer_out (& tcp_req -> req ) != 0 ) {
3135
3158
assert (0 ); /* No good way to handle this currently */
3136
3159
}
@@ -3140,19 +3163,22 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
3140
3163
0 , (uintptr_t )tcp_req );
3141
3164
/* Some external code must kick a request into TCP_REQUEST_STATE_COMPLETED
3142
3165
* to escape this state. */
3166
+ check_time (tcp_req );
3143
3167
break ;
3144
3168
case TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE :
3145
3169
spdk_trace_record (TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_RELEASE , tqpair -> qpair .trace_id , 0 ,
3146
3170
(uintptr_t )tcp_req );
3147
3171
/* Some external code must kick a request into TCP_REQUEST_STATE_COMPLETED
3148
3172
* to escape this state. */
3173
+ check_time (tcp_req );
3149
3174
break ;
3150
3175
case TCP_REQUEST_STATE_COMPLETED :
3151
3176
spdk_trace_record (TRACE_TCP_REQUEST_STATE_COMPLETED , tqpair -> qpair .trace_id , 0 , (uintptr_t )tcp_req ,
3152
3177
tqpair -> qpair .queue_depth );
3153
3178
/* If there's an outstanding PDU sent to the host, the request is completed
3154
3179
* due to the qpair being disconnected. We must delay the completion until
3155
3180
* that write is done to avoid freeing the request twice. */
3181
+ check_time (tcp_req );
3156
3182
if (spdk_unlikely (tcp_req -> pdu_in_use )) {
3157
3183
SPDK_DEBUGLOG (nvmf_tcp , "Delaying completion due to outstanding "
3158
3184
"write on req=%p\n" , tcp_req );
0 commit comments