Commit 95d63bb1 authored by Camille Coti's avatar Camille Coti
Browse files

Bginning of the hierarchical parallelization

parent aba1b02e
......@@ -360,7 +360,8 @@ gi::ex multiply_1level_mw_hierarch( tensor3D_t& T, matrix_int_t& J, int size ) {
/* Create new datatypes for the parameters */
create_parameters_datatye();
create_parameters_datatye_2();
create_parameters_datatye_2_1();
create_parameters_datatye_2_2();
/* Create the communicators */
......@@ -388,7 +389,8 @@ gi::ex multiply_1level_mw_hierarch( tensor3D_t& T, matrix_int_t& J, int size ) {
/* Finalize */
free_parameters_dt( );
free_parameters_2_dt( );
free_parameters_2_1_dt( );
free_parameters_2_2_dt( );
return Tens;
}
......@@ -19,14 +19,14 @@ typedef std::vector<int> vector_int_t;
*******************************************************************************/
// sequential
gi::ex multiply_seq( tensor3D_t&, matrix_int_t&, int );
gi::ex multiply_1level( tensor3D_t&, matrix_int_t&, int );
gi::ex multiply_2levels( tensor3D_t&, matrix_int_t&, int );
gi::ex multiply_seq( tensor3D_t&, int );
gi::ex multiply_1level( tensor3D_t&, int );
gi::ex multiply_2levels( tensor3D_t&, int );
// parallel
gi::ex multiply_1level_mw( tensor3D_t&, int );
gi::ex multiply_1level_mw_addslave( tensor3D_t&, int );
gi::ex multiply_1level_mw_hierarch( tensor3D_t&, matrix_int_t&, int );
gi::ex multiply_combined( tensor3D_t&, matrix_int_t&, int );
//gi::ex multiply_1level_mw_hierarch( tensor3D_t&, int );
//gi::ex multiply_combined( tensor3D_t&, int );
/*******************************************************************************
* Default values *
......
......@@ -26,16 +26,14 @@ parameters_t::parameters_t( unsigned int a4, unsigned int a2, unsigned int a1 )
this->a1 = a1;
}
parameters_2_t::parameters_2_t( gi::ex A, unsigned int a1, unsigned int a2, unsigned int a3, unsigned int b1, unsigned int b2, unsigned int b3, unsigned int c1, unsigned int c2 ){
memcpy( this->A, linearize_expression( A ).c_str(), 6 );
this->a1 = a1;
parameters_2_1_t::parameters_2_1_t( unsigned int a4, unsigned int a2 ){
this->a4 = a4;
this->a2 = a2;
this->a3 = a3;
this->b1 = b1;
this->b2 = b2;
this->b3 = b3;
this->c1 = c1;
this->c2 = c2;
}
parameters_2_2_t::parameters_2_2_t( unsigned int a4, unsigned int a2 ){
this->a6 = a6;
this->a1 = a1;
}
void create_parameters_datatye(){
......@@ -43,17 +41,26 @@ void create_parameters_datatye(){
MPI_Type_commit( &DT_PARAMETERS );
}
void create_parameters_datatye_2_1(){
MPI_Type_contiguous( 2, MPI_UNSIGNED, &DT_PARAMETERS_2_1 );
MPI_Type_commit( &DT_PARAMETERS_2_1 );
}
void create_parameters_datatye_2(){
MPI_Type_contiguous( 4, MPI_UNSIGNED, &DT_PARAMETERS );
MPI_Type_commit( &DT_PARAMETERS_2 );
MPI_Type_contiguous( 2, MPI_UNSIGNED, &DT_PARAMETERS_2_2 );
MPI_Type_commit( &DT_PARAMETERS_2_2 );
}
void free_parameters_dt( ){
MPI_Type_free( &DT_PARAMETERS );
}
void free_parameters_2_dt( ){
MPI_Type_free( &DT_PARAMETERS_2 );
void free_parameters_2_1_dt( ){
MPI_Type_free( &DT_PARAMETERS_2_1 );
}
void free_parameters_2_2_dt( ){
MPI_Type_free( &DT_PARAMETERS_2_2 );
}
gi::ex add_expressions( std::vector<std::string> expressions, gi::lst symbols ) {
......@@ -74,7 +81,7 @@ void send_end( int peer, MPI_Comm comm ) {
void send_end_batch( int peer, MPI_Comm comm ) {
parameters_t para;
MPI_Send( &para, 1, DT_PARAMETERS_2, peer, TAG_END_BATCH, comm );
MPI_Send( &para, 1, DT_PARAMETERS_2_1, peer, TAG_END_BATCH, comm );
}
/* M -> W: Send some work: just a parameter set */
......@@ -85,10 +92,10 @@ void send_work( std::vector<parameters_t>& input, int peer, MPI_Comm comm ){
MPI_Send( &para, 1, DT_PARAMETERS, peer, TAG_WORK, comm );
}
void send_work( std::vector<parameters_2_t>& input, int peer, MPI_Comm comm ){
parameters_2_t para = input.back();
void send_work( std::vector<parameters_2_1_t>& input, int peer, MPI_Comm comm ){
parameters_2_1_t para = input.back();
input.pop_back();
MPI_Send( &para, 1, DT_PARAMETERS_2, peer, TAG_WORK, comm );
MPI_Send( &para, 1, DT_PARAMETERS_2_1, peer, TAG_WORK, comm );
}
/* M -> W: Send a set of expressions to be added */
......@@ -132,7 +139,7 @@ void send_add_or_end_addslave( std::vector<std::string>& results, int peer, int
send_expressions_to_add( results, peer );
} else {
send_end( peer );
(*running)--;
(*running)--;
}
}
......
......@@ -15,13 +15,19 @@ public:
parameters_t( void ){};
};
class parameters_2_t{
class parameters_2_1_t{
public:
char A[6]; // A is always a simple symbol, of form T_xyz.
unsigned int a1, a2, a3, b1, b2, b3, c1, c2;
parameters_2_t( gi::ex, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int );
parameters_2_t( void ){};
unsigned int a4, a2;
parameters_2_1_t( unsigned int, unsigned int );
parameters_2_1_t( void ){};
};
class parameters_2_2_t{
public:
unsigned int a6, a1;
parameters_2_2_t( unsigned int, unsigned int );
parameters_2_2_t( void ){};
};
/*******************************************************************************
* Prototypes *
......@@ -31,7 +37,7 @@ std::string linearize_expression( gi::ex );
gi::ex de_linearize_expression( std::string, gi::lst );
void send_work( std::vector<parameters_t>& input, int peer, MPI_Comm comm = MPI_COMM_WORLD );
void send_work( std::vector<parameters_2_t>& input, int peer, MPI_Comm comm = MPI_COMM_WORLD );
void send_work( std::vector<parameters_2_2_t>& input, int peer, MPI_Comm comm = MPI_COMM_WORLD );
void send_expressions_to_add( std::vector<std::string>&, int );
void send_add_or_end_addslave( std::vector<std::string>&, int, int* );
......@@ -41,9 +47,11 @@ void send_end( int peer, MPI_Comm comm = MPI_COMM_WORLD );
void send_end_batch( int peer, MPI_Comm comm = MPI_COMM_WORLD );
void create_parameters_datatye( void );
void create_parameters_datatye_2( void );
void create_parameters_datatye_2_1( void );
void create_parameters_datatye_2_2( void );
void free_parameters_dt( void );
void free_parameters_2_dt( void );
void free_parameters_2_1_dt( void );
void free_parameters_2_2_dt( void );
gi::ex add_expressions( std::vector<std::string>, gi::lst );
......@@ -52,7 +60,8 @@ gi::ex add_expressions( std::vector<std::string>, gi::lst );
*******************************************************************************/
extern MPI_Datatype DT_PARAMETERS;
extern MPI_Datatype DT_PARAMETERS_2;
extern MPI_Datatype DT_PARAMETERS_2_1;
extern MPI_Datatype DT_PARAMETERS_2_2;
extern unsigned int nbforemen; /* Number of foremen to use with the hierarchical M/W */
extern unsigned int maxresult; /* Maximum results in the result queue, addslave version */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment