Commit f0055edc authored by Camille Coti's avatar Camille Coti
Browse files

Combined algorithm

parent df19e6c1
......@@ -28,8 +28,8 @@ NP = 5
MPISRC = masterworker.cpp mw_addslave.cpp \
perf.cpp sequential.cpp tensormatrix_mpi.cpp \
utils.cpp utils_parall.cpp profiling.cpp
#mw_combined.cpp hierarchical.cpp
utils.cpp utils_parall.cpp profiling.cpp mw_combined.cpp
# hierarchical.cpp
MPIOBJ= $(MPISRC:.cpp=.o)
......
......@@ -25,8 +25,8 @@ typedef enum {
/* This one is a "regular" master. It returns either when it is done, or when it decides to switch to another algorithm.
*/
end_code_t multiply_combined_master_initial( tensor3D_t& T, matrix_int_t& J, unsigned int size, gi::ex& Tens, MPI_Comm comm = MPI_COMM_WORLD ) {
unsigned int a1, a2, a3, b1;
end_code_t multiply_combined_master_initial( tensor3D_t& T, unsigned int size, gi::ex& Tens, MPI_Comm comm = MPI_COMM_WORLD ) {
unsigned int a1, a2, a4;
gi::ex A;
gi::lst symbols;
......@@ -52,6 +52,7 @@ end_code_t multiply_combined_master_initial( tensor3D_t& T, matrix_int_t& J, uns
j = 0;
int receivedresults = 0;
unsigned int N = size/2;
std::vector<parameters_t> input;
std::vector<std::string> results_s;
......@@ -59,19 +60,16 @@ end_code_t multiply_combined_master_initial( tensor3D_t& T, matrix_int_t& J, uns
/* Build a list of argument sets */
for( a1 = 0 ; a1 < size; a1++ ){
for( a4 = 0 ; a4 < N ; a4++ ){
i=i+1;
for( a2 = 0; a2 < size ; a2++ ){
for( a2 = 0; a2 < N ; a2++ ){
j=j+1;
for( a3 = 0 ; a3 < size ; a3++ ){
A = T[a1][a2][a3];
for( b1 = 0 ; b1 < size ; b1++ ){
parameters_t p( A, a1, a2, a3, b1 );
for( a1 = 0 ; a1 < N ; a1++ ){
parameters_t p( a4, a2, a1 );
input.push_back( p );
}
}
}
}
/* Compute the set of symbols */
/* Could be done while the first slave is working */
......@@ -99,8 +97,6 @@ end_code_t multiply_combined_master_initial( tensor3D_t& T, matrix_int_t& J, uns
if( !initialround )
t_average = std::accumulate( times.begin(), times.end(), 0.0 )/(double)(np - 1);
std::cout << "wait " << t_wait << std::endl;
if( status.MPI_TAG == TAG_PULL ) {
/* Nothing else will come: just send wome work */
......@@ -222,9 +218,9 @@ end_code_t multiply_combined_master_initial( tensor3D_t& T, matrix_int_t& J, uns
/* The traditional slave */
void multiply_combined_slave_initial( tensor3D_t& T, matrix_int_t& J, int size, MPI_Comm comm = MPI_COMM_WORLD ) {
void multiply_combined_slave_initial( tensor3D_t& T, int size, MPI_Comm comm = MPI_COMM_WORLD ) {
gi::ex Tens;
int a1, a2, a3, b1;
int a1, a2, a4;
// gi::ex A;
unsigned int len = 0;
......@@ -255,12 +251,10 @@ void multiply_combined_slave_initial( tensor3D_t& T, matrix_int_t& J, int size,
if( status.MPI_TAG == TAG_WORK ){
a1 = params.a1;
a2 = params.a2;
a3 = params.a3;
b1 = params.b1;
gi::symbol A( std::string( params.A ) );
a4 = params.a4;
t_start = rdtsc();
Tens = one_level1_product( &T, &J, A, size, a1, a2, a3, b1 );
Tens = one_level1_product( &T, size, a4, a2, a1 );
t_compute = rdtsc() - t_start;
/* TODO if we waited for too long */
......@@ -273,7 +267,7 @@ void multiply_combined_slave_initial( tensor3D_t& T, matrix_int_t& J, int size,
/* Receive a set of expressions to add */
/* Number of expressions received */
int nb = params.a1;
int nb = params.a4;
/* Length of each string */
......@@ -315,13 +309,13 @@ void multiply_combined_slave_initial( tensor3D_t& T, matrix_int_t& J, int size,
* Combined master-worker *
*******************************************************************************/
gi::ex multiply_combined_master( tensor3D_t& T, matrix_int_t& J, int size ) { // simpler: same dimension everywhere
gi::ex multiply_combined_master( tensor3D_t& T, int size ) { // simpler: same dimension everywhere
gi::ex Tens = 0;
end_code_t rc;
/* Initially: start as a traditional M/W */
rc = multiply_combined_master_initial( T, J, size, Tens );
rc = multiply_combined_master_initial( T, size, Tens );
switch( rc ){
case FINISHED:
return Tens;
......@@ -330,16 +324,16 @@ gi::ex multiply_combined_master( tensor3D_t& T, matrix_int_t& J, int size ) { /
return Tens;
}
void multiply_combined_worker( tensor3D_t& T, matrix_int_t& J, int size ) { // simpler: same dimension everywhere
void multiply_combined_worker( tensor3D_t& T, int size ) { // simpler: same dimension everywhere
gi::ex Tens = 0;
std::cout << "worker" << std::endl;
multiply_combined_slave_initial( T, J, size );
multiply_combined_slave_initial( T, size );
}
gi::ex multiply_combined( tensor3D_t& T, matrix_int_t& J, int size ) { // simpler: same dimension everywhere
gi::ex multiply_combined( tensor3D_t& T, int size ) { // simpler: same dimension everywhere
int rank;
gi::ex Tens = 0;
......@@ -352,9 +346,9 @@ gi::ex multiply_combined( tensor3D_t& T, matrix_int_t& J, int size ) { // simpl
/* Here we go */
if( 0 == rank ) {
Tens = multiply_combined_master( T, J, size );
Tens = multiply_combined_master( T, size );
} else {
multiply_combined_worker( T, J, size );
multiply_combined_worker( T, size );
}
/* Finalize */
......
......@@ -26,7 +26,7 @@ gi::ex multiply_2levels( tensor3D_t&, matrix_int_t&, int );
gi::ex multiply_1level_mw( tensor3D_t&, int );
gi::ex multiply_1level_mw_addslave( tensor3D_t&, int );
gi::ex multiply_1level_mw_hierarch( tensor3D_t&, matrix_int_t&, int );
gi::ex multiply_combined( tensor3D_t&, matrix_int_t&, int );
gi::ex multiply_combined( tensor3D_t&, int );
/*******************************************************************************
* Default values *
......
......@@ -139,10 +139,10 @@ int main( int argc, char** argv ){
break;
/*case 'h':
Tpara = multiply_1level_mw_hierarch( T, J, N );
break;
case 'c':
Tpara = multiply_combined( T, J, N );
break;*/
case 'c':
Tpara = multiply_combined( T, N );
break;
case 's':
Tpara = multiply_seq( T, J, N );
break;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment