Commit df19e6c1 authored by Camille Coti's avatar Camille Coti
Browse files

Implemented the master-worker pattern with addition on a slavee

parent a51b9c94
...@@ -26,10 +26,10 @@ LDOPT = -lginac $(TAULIB) ...@@ -26,10 +26,10 @@ LDOPT = -lginac $(TAULIB)
MPIEXEC = mpiexec MPIEXEC = mpiexec
NP = 5 NP = 5
MPISRC = masterworker.cpp \ MPISRC = masterworker.cpp mw_addslave.cpp \
perf.cpp sequential.cpp tensormatrix_mpi.cpp \ perf.cpp sequential.cpp tensormatrix_mpi.cpp \
utils.cpp utils_parall.cpp profiling.cpp utils.cpp utils_parall.cpp profiling.cpp
#mw_combined.cpp mw_addslave.cpp hierarchical.cpp #mw_combined.cpp hierarchical.cpp
MPIOBJ= $(MPISRC:.cpp=.o) MPIOBJ= $(MPISRC:.cpp=.o)
......
...@@ -54,7 +54,6 @@ gi::ex multiply_1level_master( tensor3D_t& T, unsigned int size, MPI_Comm comm = ...@@ -54,7 +54,6 @@ gi::ex multiply_1level_master( tensor3D_t& T, unsigned int size, MPI_Comm comm =
} }
} }
} }
/* Compute the set of symbols */ /* Compute the set of symbols */
/* Could be done while the first slave is working */ /* Could be done while the first slave is working */
......
...@@ -15,9 +15,9 @@ namespace gi = GiNaC; ...@@ -15,9 +15,9 @@ namespace gi = GiNaC;
* Parallel 1-level decomposition with addition on a slave * * Parallel 1-level decomposition with addition on a slave *
*******************************************************************************/ *******************************************************************************/
gi::ex multiply_1level_master_addslave( tensor3D_t& T, matrix_int_t& J, unsigned int size, MPI_Comm comm = MPI_COMM_WORLD ) { gi::ex multiply_1level_master_addslave( tensor3D_t& T, unsigned int size, MPI_Comm comm = MPI_COMM_WORLD ) {
gi::ex Tens = 0; gi::ex Tens = 0;
unsigned int a1, a2, a3, b1; unsigned int a1, a2, a4;
gi::ex A; gi::ex A;
gi::lst symbols; gi::lst symbols;
...@@ -37,24 +37,22 @@ gi::ex multiply_1level_master_addslave( tensor3D_t& T, matrix_int_t& J, unsigned ...@@ -37,24 +37,22 @@ gi::ex multiply_1level_master_addslave( tensor3D_t& T, matrix_int_t& J, unsigned
j = 0; j = 0;
int receivedresults = 0; int receivedresults = 0;
unsigned int N = size/2;
std::vector<parameters_t> input; std::vector<parameters_t> input;
std::vector<std::string> results; /* length and char* */ std::vector<std::string> results; /* length and char* */
/* Build a list of argument sets */ /* Build a list of argument sets */
for( a1 = 0 ; a1 < size; a1++ ){ for( a4 = 0 ; a4 < N ; a4++ ){
i=i+1; i=i+1;
for( a2 = 0; a2 < size ; a2++ ){ for( a2 = 0; a2 < N ; a2++ ){
j=j+1; j=j+1;
for( a3 = 0 ; a3 < size ; a3++ ){ for( a1 = 0 ; a1 < N ; a1++ ){
A = T[a1][a2][a3]; parameters_t p( a4, a2, a1 );
for( b1 = 0 ; b1 < size ; b1++ ){ input.push_back( p );
parameters_t p( A, a1, a2, a3, b1 ); }
input.push_back( p ); }
}
}
}
} }
/* Compute the set of symbols */ /* Compute the set of symbols */
...@@ -137,9 +135,9 @@ gi::ex multiply_1level_master_addslave( tensor3D_t& T, matrix_int_t& J, unsigned ...@@ -137,9 +135,9 @@ gi::ex multiply_1level_master_addslave( tensor3D_t& T, matrix_int_t& J, unsigned
return Tens; return Tens;
} }
void multiply_1level_slave_addslave( tensor3D_t& T, matrix_int_t& J, unsigned int size, MPI_Comm comm = MPI_COMM_WORLD ) { void multiply_1level_slave_addslave( tensor3D_t& T, unsigned int size, MPI_Comm comm = MPI_COMM_WORLD ) {
gi::ex Tens; gi::ex Tens;
int a1, a2, a3, b1; int a1, a2, a4;
// gi::ex A; // gi::ex A;
unsigned int len = 0; unsigned int len = 0;
...@@ -164,13 +162,11 @@ void multiply_1level_slave_addslave( tensor3D_t& T, matrix_int_t& J, unsigned in ...@@ -164,13 +162,11 @@ void multiply_1level_slave_addslave( tensor3D_t& T, matrix_int_t& J, unsigned in
MPI_Recv( &params, 1, DT_PARAMETERS, ROOT, MPI_ANY_TAG, comm, &status ); MPI_Recv( &params, 1, DT_PARAMETERS, ROOT, MPI_ANY_TAG, comm, &status );
if( status.MPI_TAG == TAG_WORK ){ if( status.MPI_TAG == TAG_WORK ){
a1 = params.a1; a4 = params.a4;
a2 = params.a2; a2 = params.a2;
a3 = params.a3; a1 = params.a1;
b1 = params.b1;
gi::symbol A( std::string( params.A ) );
Tens = one_level1_product( &T, &J, A, size, a1, a2, a3, b1 ); Tens = one_level1_product( &T, size, a4, a2, a1 );
send_result( Tens ); send_result( Tens );
} else { } else {
...@@ -178,7 +174,7 @@ void multiply_1level_slave_addslave( tensor3D_t& T, matrix_int_t& J, unsigned in ...@@ -178,7 +174,7 @@ void multiply_1level_slave_addslave( tensor3D_t& T, matrix_int_t& J, unsigned in
/* Receive a set of expressions to add */ /* Receive a set of expressions to add */
/* Number of expressions received */ /* Number of expressions received */
int nb = params.a1; int nb = params.a4;
/* Length of each string */ /* Length of each string */
...@@ -221,7 +217,7 @@ void multiply_1level_slave_addslave( tensor3D_t& T, matrix_int_t& J, unsigned in ...@@ -221,7 +217,7 @@ void multiply_1level_slave_addslave( tensor3D_t& T, matrix_int_t& J, unsigned in
W -> M: send an unsigned int (size of the expression), then the expression (table of chars) W -> M: send an unsigned int (size of the expression), then the expression (table of chars)
*/ */
gi::ex multiply_1level_mw_addslave( tensor3D_t& T, matrix_int_t& J, int size ) { // simpler: same dimension everywhere gi::ex multiply_1level_mw_addslave( tensor3D_t& T, int size ) { // simpler: same dimension everywhere
int rank; int rank;
gi::ex Tens = 0; gi::ex Tens = 0;
MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Comm_rank( MPI_COMM_WORLD, &rank );
...@@ -233,9 +229,9 @@ gi::ex multiply_1level_mw_addslave( tensor3D_t& T, matrix_int_t& J, int size ) { ...@@ -233,9 +229,9 @@ gi::ex multiply_1level_mw_addslave( tensor3D_t& T, matrix_int_t& J, int size ) {
/* Here we go */ /* Here we go */
if( 0 == rank ) { if( 0 == rank ) {
Tens = multiply_1level_master_addslave( T, J, size ); Tens = multiply_1level_master_addslave( T, size );
} else { } else {
multiply_1level_slave_addslave( T, J, size ); multiply_1level_slave_addslave( T, size );
} }
/* Finalize */ /* Finalize */
......
...@@ -24,7 +24,7 @@ gi::ex multiply_1level( tensor3D_t&, matrix_int_t&, int ); ...@@ -24,7 +24,7 @@ gi::ex multiply_1level( tensor3D_t&, matrix_int_t&, int );
gi::ex multiply_2levels( tensor3D_t&, matrix_int_t&, int ); gi::ex multiply_2levels( tensor3D_t&, matrix_int_t&, int );
// parallel // parallel
gi::ex multiply_1level_mw( tensor3D_t&, int ); gi::ex multiply_1level_mw( tensor3D_t&, int );
gi::ex multiply_1level_mw_addslave( tensor3D_t&, matrix_int_t&, int ); gi::ex multiply_1level_mw_addslave( tensor3D_t&, int );
gi::ex multiply_1level_mw_hierarch( tensor3D_t&, matrix_int_t&, int ); gi::ex multiply_1level_mw_hierarch( tensor3D_t&, matrix_int_t&, int );
gi::ex multiply_combined( tensor3D_t&, matrix_int_t&, int ); gi::ex multiply_combined( tensor3D_t&, matrix_int_t&, int );
......
...@@ -134,10 +134,10 @@ int main( int argc, char** argv ){ ...@@ -134,10 +134,10 @@ int main( int argc, char** argv ){
case 'm': case 'm':
Tpara = multiply_1level_mw( T, N ); Tpara = multiply_1level_mw( T, N );
break; break;
/* case 'a': case 'a':
Tpara = multiply_1level_mw_addslave( T, J, N ); Tpara = multiply_1level_mw_addslave( T, N );
break; break;
case 'h': /*case 'h':
Tpara = multiply_1level_mw_hierarch( T, J, N ); Tpara = multiply_1level_mw_hierarch( T, J, N );
break; break;
case 'c': case 'c':
......
...@@ -98,7 +98,7 @@ void send_expressions_to_add( std::vector<std::string>& results, int peer ) { ...@@ -98,7 +98,7 @@ void send_expressions_to_add( std::vector<std::string>& results, int peer ) {
/* Fill a bogus parameter object */ /* Fill a bogus parameter object */
int nb = results.size(); int nb = results.size();
int i; int i;
parameters_t p( 0, 0, 0 ); parameters_t p( nb, 0, 0 );
char* expr; char* expr;
MPI_Send( &p, 1, DT_PARAMETERS, peer, TAG_ADD, MPI_COMM_WORLD ); MPI_Send( &p, 1, DT_PARAMETERS, peer, TAG_ADD, MPI_COMM_WORLD );
...@@ -115,7 +115,7 @@ void send_expressions_to_add( std::vector<std::string>& results, int peer ) { ...@@ -115,7 +115,7 @@ void send_expressions_to_add( std::vector<std::string>& results, int peer ) {
expr = const_cast<char*>( results[i].c_str() ); expr = const_cast<char*>( results[i].c_str() );
MPI_Send( expr, results[i].length(), MPI_CHAR, peer, TAG_ADD, MPI_COMM_WORLD ); MPI_Send( expr, results[i].length(), MPI_CHAR, peer, TAG_ADD, MPI_COMM_WORLD );
} }
results.erase( results.begin(), results.end() ); results.erase( results.begin(), results.end() );
free( lengths ); free( lengths );
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment