diff --git a/src/mw_addslave.cpp b/src/mw_addslave.cpp index 56c398eb3bb1a17b394f4ca0e531433ce7aad1f8..6bd37dd48306ed307ad5e36144fed8f4504bf2b8 100644 --- a/src/mw_addslave.cpp +++ b/src/mw_addslave.cpp @@ -92,7 +92,6 @@ gi::ex multiply_1level_master_addslave( tensor3D_t& T, matrix_int_t& J, unsigned /* Send more work */ send_work_addslave( input, results, src ); - } else { std::cerr << "Wrong tag received " << status.MPI_TAG << std::endl; } diff --git a/src/utils_parall.cpp b/src/utils_parall.cpp index 5fdb2af05594cf83ef27c7598e7f7e4a2e0f423b..503ac709679fc8720b8711f3eb0e53250c1f8e69 100644 --- a/src/utils_parall.cpp +++ b/src/utils_parall.cpp @@ -151,15 +151,16 @@ void send_expressions_to_add( std::vector<std::string>& results, int peer ) { /* M -> W: Send either a set of expressions to add, or the end signal */ -void send_add_or_end_addslave( std::vector<std::string> results, int peer, int* running ){ +void send_add_or_end_addslave( std::vector<std::string>& results, int peer, int* running ){ /* Do I have a lot of results to be treated in the result queue? */ + if( results.size() > MAXRESULT ) { /* if the result queue is too big, send it */ send_expressions_to_add( results, peer ); } else { send_end( peer ); - (*running)--; + (*running)--; } } diff --git a/src/utils_parall.h b/src/utils_parall.h index e0dc6f1fadc585cf10de1900e6d7336d0579c0dc..24350efa192e4f680fe24f5a9c27a072c25220b0 100644 --- a/src/utils_parall.h +++ b/src/utils_parall.h @@ -35,7 +35,7 @@ void send_work( std::vector<parameters_t>& input, int peer, MPI_Comm comm = MPI_ void send_work( std::vector<parameters_2_t>& input, int peer, MPI_Comm comm = MPI_COMM_WORLD ); void send_expressions_to_add( std::vector<std::string>&, int ); -void send_add_or_end_addslave( std::vector<std::string>, int, int* ); +void send_add_or_end_addslave( std::vector<std::string>&, int, int* ); void send_work_addslave( std::vector<parameters_t>&, std::vector<std::string>&, int ) ; void send_result( gi::ex T, MPI_Comm comm = MPI_COMM_WORLD ); void send_end( int peer, MPI_Comm comm = MPI_COMM_WORLD );