Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Completely removed ompi_request_lock and ompi_request_cond #2448

Merged
merged 2 commits into from
Jan 4, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions ompi/debuggers/ompi_mpihandles_dll.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,10 +84,6 @@ struct mpidbg_name_map_t *mpidbg_status_name_map = NULL;
* cases like the above with compilers that require the symbol (like
* Sun Studio) we add in these definitions here.
*/
size_t ompi_request_completed;
opal_condition_t ompi_request_cond;
size_t ompi_request_waiting;
opal_mutex_t ompi_request_lock;
opal_mutex_t opal_event_lock;
int opal_progress_spin_count;
bool opal_mutex_check_locks;
Expand Down
4 changes: 0 additions & 4 deletions ompi/debuggers/ompi_msgq_dll.c
Original file line number Diff line number Diff line change
Expand Up @@ -1059,10 +1059,6 @@ static void dump_request( mqs_taddr_t current_item, mqs_pending_operation *res )
printf( "+===============================================+\n\n" );
}

/**
* TODO: ompi_request_completed can be used to detect any changes in the request handles.
*/

/**
* Handle the send queue as well as the receive queue. The unexpected queue
* is a whole different story ...
Expand Down
5 changes: 2 additions & 3 deletions ompi/mca/bcol/iboffload/bcol_iboffload_component.c
Original file line number Diff line number Diff line change
Expand Up @@ -931,9 +931,8 @@ static int progress_one_device(mca_bcol_iboffload_device_t *device)

if (coll_request->n_frag_mpi_complete ==
coll_request->n_fragments) {
coll_request->super.req_complete = true;
opal_condition_broadcast(&ompi_request_cond);
IBOFFLOAD_VERBOSE(10, ("After opal_condition_broadcast.\n"));
OPAL_ATOMIC_SWAP_PTR(&coll_request->super.reg_complete, REQUEST_COMPLETED);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This has lost the cond bcast semantic. Probably it needs an update_sync instead.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the original code already had an issue. The request is marked as completed and the waiting thread is informed. As the request is an MPI request, it will eventually be freed by the calling thread. However, few lines below, the coll_request is passed to handle_collfrag_done, which also put the request in an internal freelist, without checking the status of the user level request.

This component has no owner and is not maintained. I would propose we scrap it out completely.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree - let's suggest it at next telecon and see if there are any objections

IBOFFLOAD_VERBOSE(10, ("After request completion.\n"));
}

rc = handle_collfrag_done(coll_frag, coll_request, device);
Expand Down
2 changes: 0 additions & 2 deletions ompi/mca/coll/libnbc/coll_libnbc_component.c
Original file line number Diff line number Diff line change
Expand Up @@ -283,9 +283,7 @@ ompi_coll_libnbc_progress(void)
else {
request->super.req_status.MPI_ERROR = res;
}
OPAL_THREAD_LOCK(&ompi_request_lock);
ompi_request_complete(&request->super, true);
OPAL_THREAD_UNLOCK(&ompi_request_lock);
}
OPAL_THREAD_LOCK(&mca_coll_libnbc_component.lock);
}
Expand Down
3 changes: 0 additions & 3 deletions ompi/mca/coll/portals4/coll_portals4_allreduce.c
Original file line number Diff line number Diff line change
Expand Up @@ -417,10 +417,7 @@ int
ompi_coll_portals4_iallreduce_intra_fini(struct ompi_coll_portals4_request_t *request)
{
allreduce_kary_tree_bottom(request);

OPAL_THREAD_LOCK(&ompi_request_lock);
ompi_request_complete(&request->super, true);
OPAL_THREAD_UNLOCK(&ompi_request_lock);

return (OMPI_SUCCESS);
}
2 changes: 0 additions & 2 deletions ompi/mca/coll/portals4/coll_portals4_barrier.c
Original file line number Diff line number Diff line change
Expand Up @@ -311,9 +311,7 @@ ompi_coll_portals4_ibarrier_intra_fini(ompi_coll_portals4_request_t *request)
return OMPI_ERROR;
}

OPAL_THREAD_LOCK(&ompi_request_lock);
ompi_request_complete(&request->super, true);
OPAL_THREAD_UNLOCK(&ompi_request_lock);

return OMPI_SUCCESS;
}
2 changes: 0 additions & 2 deletions ompi/mca/coll/portals4/coll_portals4_bcast.c
Original file line number Diff line number Diff line change
Expand Up @@ -941,9 +941,7 @@ ompi_coll_portals4_ibcast_intra_fini(ompi_coll_portals4_request_t *request)

post_bcast_data(request);

OPAL_THREAD_LOCK(&ompi_request_lock);
ompi_request_complete(&request->super, true);
OPAL_THREAD_UNLOCK(&ompi_request_lock);

opal_output_verbose(10, ompi_coll_base_framework.framework_output, "ibcast_intra_fini");
return (OMPI_SUCCESS);
Expand Down
4 changes: 0 additions & 4 deletions ompi/mca/coll/portals4/coll_portals4_gather.c
Original file line number Diff line number Diff line change
Expand Up @@ -1132,9 +1132,7 @@ ompi_coll_portals4_gather_intra_binomial_bottom(struct ompi_communicator_t *comm

request->super.req_status.MPI_ERROR = OMPI_SUCCESS;

OPAL_THREAD_LOCK(&ompi_request_lock);
ompi_request_complete(&request->super, true);
OPAL_THREAD_UNLOCK(&ompi_request_lock);

OPAL_OUTPUT_VERBOSE((10, ompi_coll_base_framework.framework_output,
"coll:portals4:gather_intra_binomial_bottom exit rank %d", request->u.gather.my_rank));
Expand Down Expand Up @@ -1195,9 +1193,7 @@ ompi_coll_portals4_gather_intra_linear_bottom(struct ompi_communicator_t *comm,

request->super.req_status.MPI_ERROR = OMPI_SUCCESS;

OPAL_THREAD_LOCK(&ompi_request_lock);
ompi_request_complete(&request->super, true);
OPAL_THREAD_UNLOCK(&ompi_request_lock);

OPAL_OUTPUT_VERBOSE((10, ompi_coll_base_framework.framework_output,
"coll:portals4:gather_intra_linear_bottom exit rank %d", request->u.gather.my_rank));
Expand Down
2 changes: 0 additions & 2 deletions ompi/mca/coll/portals4/coll_portals4_reduce.c
Original file line number Diff line number Diff line change
Expand Up @@ -441,9 +441,7 @@ ompi_coll_portals4_ireduce_intra_fini(ompi_coll_portals4_request_t *request)
if (OMPI_SUCCESS != ret)
return ret;

OPAL_THREAD_LOCK(&ompi_request_lock);
ompi_request_complete(&request->super, true);
OPAL_THREAD_UNLOCK(&ompi_request_lock);

return (OMPI_SUCCESS);
}
2 changes: 0 additions & 2 deletions ompi/mca/coll/portals4/coll_portals4_scatter.c
Original file line number Diff line number Diff line change
Expand Up @@ -637,9 +637,7 @@ ompi_coll_portals4_scatter_intra_linear_bottom(struct ompi_communicator_t *comm,

request->super.req_status.MPI_ERROR = OMPI_SUCCESS;

OPAL_THREAD_LOCK(&ompi_request_lock);
ompi_request_complete(&request->super, true);
OPAL_THREAD_UNLOCK(&ompi_request_lock);

OPAL_OUTPUT_VERBOSE((10, ompi_coll_base_framework.framework_output,
"coll:portals4:scatter_intra_linear_bottom exit rank %d", request->u.scatter.my_rank));
Expand Down
2 changes: 0 additions & 2 deletions ompi/mca/osc/portals4/osc_portals4_component.c
Original file line number Diff line number Diff line change
Expand Up @@ -229,9 +229,7 @@ progress_callback(void)
opal_atomic_add_size_t(&req->super.req_status._ucount, ev.mlength);
ops = opal_atomic_add_32(&req->ops_committed, 1);
if (ops == req->ops_expected) {
OPAL_THREAD_LOCK(&ompi_request_lock);
ompi_request_complete(&req->super, true);
OPAL_THREAD_UNLOCK(&ompi_request_lock);
}
}
}
Expand Down
3 changes: 0 additions & 3 deletions ompi/mca/pml/cm/pml_cm_recvreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ mca_pml_cm_recv_request_free(struct ompi_request_t** request)

assert( false == recvreq->req_free_called );

OPAL_THREAD_LOCK(&ompi_request_lock);
recvreq->req_free_called = true;
if( true == recvreq->req_pml_complete ) {
if( MCA_PML_CM_REQUEST_RECV_THIN == recvreq->req_pml_type ) {
Expand All @@ -38,8 +37,6 @@ mca_pml_cm_recv_request_free(struct ompi_request_t** request)
}
}

OPAL_THREAD_UNLOCK(&ompi_request_lock);

*request = MPI_REQUEST_NULL;
return OMPI_SUCCESS;
}
Expand Down
3 changes: 0 additions & 3 deletions ompi/mca/pml/cm/pml_cm_sendreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ mca_pml_cm_send_request_free(struct ompi_request_t** request)
mca_pml_cm_send_request_t* sendreq = *(mca_pml_cm_send_request_t**)request;
assert( false == sendreq->req_base.req_free_called );

OPAL_THREAD_LOCK(&ompi_request_lock);
sendreq->req_base.req_free_called = true;
if( true == sendreq->req_base.req_pml_complete ) {
if( MCA_PML_CM_REQUEST_SEND_THIN == sendreq->req_base.req_pml_type ) {
Expand All @@ -45,8 +44,6 @@ mca_pml_cm_send_request_free(struct ompi_request_t** request)
}
}

OPAL_THREAD_UNLOCK(&ompi_request_lock);

*request = MPI_REQUEST_NULL;
return OMPI_SUCCESS;
}
Expand Down
9 changes: 0 additions & 9 deletions ompi/request/request.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,6 @@
#include "ompi/constants.h"

opal_pointer_array_t ompi_request_f_to_c_table = {{0}};
size_t ompi_request_waiting = 0;
size_t ompi_request_completed = 0;
size_t ompi_request_failed = 0;
opal_recursive_mutex_t ompi_request_lock = {{0}};
opal_condition_t ompi_request_cond = {{0}};
ompi_predefined_request_t ompi_request_null = {{{{{0}}}}};
ompi_predefined_request_t *ompi_request_null_addr = &ompi_request_null;
ompi_request_t ompi_request_empty = {{{{0}}}};
Expand Down Expand Up @@ -109,8 +104,6 @@ OBJ_CLASS_INSTANCE(

int ompi_request_init(void)
{
OBJ_CONSTRUCT(&ompi_request_lock, opal_recursive_mutex_t);
OBJ_CONSTRUCT(&ompi_request_cond, opal_condition_t);

OBJ_CONSTRUCT(&ompi_request_null, ompi_request_t);
OBJ_CONSTRUCT(&ompi_request_f_to_c_table, opal_pointer_array_t);
Expand Down Expand Up @@ -186,8 +179,6 @@ int ompi_request_finalize(void)
OBJ_DESTRUCT( &ompi_request_null.request );
OMPI_REQUEST_FINI( &ompi_request_empty );
OBJ_DESTRUCT( &ompi_request_empty );
OBJ_DESTRUCT( &ompi_request_cond );
OBJ_DESTRUCT( &ompi_request_lock );
OBJ_DESTRUCT( &ompi_request_f_to_c_table );
return OMPI_SUCCESS;
}
10 changes: 0 additions & 10 deletions ompi/request/request.h
Original file line number Diff line number Diff line change
Expand Up @@ -315,12 +315,6 @@ typedef struct ompi_request_fns_t {
* Globals used for tracking requests and request completion.
*/
OMPI_DECLSPEC extern opal_pointer_array_t ompi_request_f_to_c_table;
OMPI_DECLSPEC extern size_t ompi_request_waiting;
OMPI_DECLSPEC extern size_t ompi_request_completed;
OMPI_DECLSPEC extern size_t ompi_request_failed;
OMPI_DECLSPEC extern int32_t ompi_request_poll;
OMPI_DECLSPEC extern opal_recursive_mutex_t ompi_request_lock;
OMPI_DECLSPEC extern opal_condition_t ompi_request_cond;
OMPI_DECLSPEC extern ompi_predefined_request_t ompi_request_null;
OMPI_DECLSPEC extern ompi_predefined_request_t *ompi_request_null_addr;
OMPI_DECLSPEC extern ompi_request_t ompi_request_empty;
Expand Down Expand Up @@ -432,10 +426,6 @@ static inline int ompi_request_complete(ompi_request_t* request, bool with_signa
}
} else
request->req_complete = REQUEST_COMPLETED;

if( OPAL_UNLIKELY(MPI_SUCCESS != request->req_status.MPI_ERROR) ) {
ompi_request_failed++;
}
}

return OMPI_SUCCESS;
Expand Down