Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
Camille Coti
TensorJoseph
Commits
df19e6c1
Commit
df19e6c1
authored
Jan 16, 2020
by
Camille Coti
Browse files
Implemented the master-worker pattern with addition on a slavee
parent
a51b9c94
Changes
6
Hide whitespace changes
Inline
Side-by-side
src/Makefile
View file @
df19e6c1
...
...
@@ -26,10 +26,10 @@ LDOPT = -lginac $(TAULIB)
MPIEXEC
=
mpiexec
NP
=
5
MPISRC
=
masterworker.cpp
\
MPISRC
=
masterworker.cpp
mw_addslave.cpp
\
perf.cpp sequential.cpp tensormatrix_mpi.cpp
\
utils.cpp utils_parall.cpp profiling.cpp
#mw_combined.cpp
mw_addslave.cpp
hierarchical.cpp
#mw_combined.cpp hierarchical.cpp
MPIOBJ
=
$(MPISRC:.cpp=.o)
...
...
src/masterworker.cpp
View file @
df19e6c1
...
...
@@ -54,7 +54,6 @@ gi::ex multiply_1level_master( tensor3D_t& T, unsigned int size, MPI_Comm comm =
}
}
}
/* Compute the set of symbols */
/* Could be done while the first slave is working */
...
...
src/mw_addslave.cpp
View file @
df19e6c1
...
...
@@ -15,9 +15,9 @@ namespace gi = GiNaC;
* Parallel 1-level decomposition with addition on a slave *
*******************************************************************************/
gi
::
ex
multiply_1level_master_addslave
(
tensor3D_t
&
T
,
matrix_int_t
&
J
,
unsigned
int
size
,
MPI_Comm
comm
=
MPI_COMM_WORLD
)
{
gi
::
ex
multiply_1level_master_addslave
(
tensor3D_t
&
T
,
unsigned
int
size
,
MPI_Comm
comm
=
MPI_COMM_WORLD
)
{
gi
::
ex
Tens
=
0
;
unsigned
int
a1
,
a2
,
a
3
,
b1
;
unsigned
int
a1
,
a2
,
a
4
;
gi
::
ex
A
;
gi
::
lst
symbols
;
...
...
@@ -37,24 +37,22 @@ gi::ex multiply_1level_master_addslave( tensor3D_t& T, matrix_int_t& J, unsigned
j
=
0
;
int
receivedresults
=
0
;
unsigned
int
N
=
size
/
2
;
std
::
vector
<
parameters_t
>
input
;
std
::
vector
<
std
::
string
>
results
;
/* length and char* */
/* Build a list of argument sets */
for
(
a
1
=
0
;
a
1
<
size
;
a
1
++
){
for
(
a
4
=
0
;
a
4
<
N
;
a
4
++
){
i
=
i
+
1
;
for
(
a2
=
0
;
a2
<
size
;
a2
++
){
j
=
j
+
1
;
for
(
a3
=
0
;
a3
<
size
;
a3
++
){
A
=
T
[
a1
][
a2
][
a3
];
for
(
b1
=
0
;
b1
<
size
;
b1
++
){
parameters_t
p
(
A
,
a1
,
a2
,
a3
,
b1
);
input
.
push_back
(
p
);
}
}
}
for
(
a2
=
0
;
a2
<
N
;
a2
++
){
j
=
j
+
1
;
for
(
a1
=
0
;
a1
<
N
;
a1
++
){
parameters_t
p
(
a4
,
a2
,
a1
);
input
.
push_back
(
p
);
}
}
}
/* Compute the set of symbols */
...
...
@@ -137,9 +135,9 @@ gi::ex multiply_1level_master_addslave( tensor3D_t& T, matrix_int_t& J, unsigned
return
Tens
;
}
void
multiply_1level_slave_addslave
(
tensor3D_t
&
T
,
matrix_int_t
&
J
,
unsigned
int
size
,
MPI_Comm
comm
=
MPI_COMM_WORLD
)
{
void
multiply_1level_slave_addslave
(
tensor3D_t
&
T
,
unsigned
int
size
,
MPI_Comm
comm
=
MPI_COMM_WORLD
)
{
gi
::
ex
Tens
;
int
a1
,
a2
,
a
3
,
b1
;
int
a1
,
a2
,
a
4
;
// gi::ex A;
unsigned
int
len
=
0
;
...
...
@@ -164,13 +162,11 @@ void multiply_1level_slave_addslave( tensor3D_t& T, matrix_int_t& J, unsigned in
MPI_Recv
(
&
params
,
1
,
DT_PARAMETERS
,
ROOT
,
MPI_ANY_TAG
,
comm
,
&
status
);
if
(
status
.
MPI_TAG
==
TAG_WORK
){
a
1
=
params
.
a
1
;
a
4
=
params
.
a
4
;
a2
=
params
.
a2
;
a3
=
params
.
a3
;
b1
=
params
.
b1
;
gi
::
symbol
A
(
std
::
string
(
params
.
A
)
);
a1
=
params
.
a1
;
Tens
=
one_level1_product
(
&
T
,
&
J
,
A
,
size
,
a
1
,
a2
,
a
3
,
b
1
);
Tens
=
one_level1_product
(
&
T
,
size
,
a
4
,
a2
,
a1
);
send_result
(
Tens
);
}
else
{
...
...
@@ -178,7 +174,7 @@ void multiply_1level_slave_addslave( tensor3D_t& T, matrix_int_t& J, unsigned in
/* Receive a set of expressions to add */
/* Number of expressions received */
int
nb
=
params
.
a
1
;
int
nb
=
params
.
a
4
;
/* Length of each string */
...
...
@@ -221,7 +217,7 @@ void multiply_1level_slave_addslave( tensor3D_t& T, matrix_int_t& J, unsigned in
W -> M: send an unsigned int (size of the expression), then the expression (table of chars)
*/
gi
::
ex
multiply_1level_mw_addslave
(
tensor3D_t
&
T
,
matrix_int_t
&
J
,
int
size
)
{
// simpler: same dimension everywhere
gi
::
ex
multiply_1level_mw_addslave
(
tensor3D_t
&
T
,
int
size
)
{
// simpler: same dimension everywhere
int
rank
;
gi
::
ex
Tens
=
0
;
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
...
...
@@ -233,9 +229,9 @@ gi::ex multiply_1level_mw_addslave( tensor3D_t& T, matrix_int_t& J, int size ) {
/* Here we go */
if
(
0
==
rank
)
{
Tens
=
multiply_1level_master_addslave
(
T
,
J
,
size
);
Tens
=
multiply_1level_master_addslave
(
T
,
size
);
}
else
{
multiply_1level_slave_addslave
(
T
,
J
,
size
);
multiply_1level_slave_addslave
(
T
,
size
);
}
/* Finalize */
...
...
src/tensormatrix.h
View file @
df19e6c1
...
...
@@ -24,7 +24,7 @@ gi::ex multiply_1level( tensor3D_t&, matrix_int_t&, int );
gi
::
ex
multiply_2levels
(
tensor3D_t
&
,
matrix_int_t
&
,
int
);
// parallel
gi
::
ex
multiply_1level_mw
(
tensor3D_t
&
,
int
);
gi
::
ex
multiply_1level_mw_addslave
(
tensor3D_t
&
,
matrix_int_t
&
,
int
);
gi
::
ex
multiply_1level_mw_addslave
(
tensor3D_t
&
,
int
);
gi
::
ex
multiply_1level_mw_hierarch
(
tensor3D_t
&
,
matrix_int_t
&
,
int
);
gi
::
ex
multiply_combined
(
tensor3D_t
&
,
matrix_int_t
&
,
int
);
...
...
src/tensormatrix_mpi.cpp
View file @
df19e6c1
...
...
@@ -134,10 +134,10 @@ int main( int argc, char** argv ){
case
'm'
:
Tpara
=
multiply_1level_mw
(
T
,
N
);
break
;
/*
case 'a':
Tpara = multiply_1level_mw_addslave( T,
J,
N );
case
'a'
:
Tpara
=
multiply_1level_mw_addslave
(
T
,
N
);
break
;
case 'h':
/*
case 'h':
Tpara = multiply_1level_mw_hierarch( T, J, N );
break;
case 'c':
...
...
src/utils_parall.cpp
View file @
df19e6c1
...
...
@@ -98,7 +98,7 @@ void send_expressions_to_add( std::vector<std::string>& results, int peer ) {
/* Fill a bogus parameter object */
int
nb
=
results
.
size
();
int
i
;
parameters_t
p
(
0
,
0
,
0
);
parameters_t
p
(
nb
,
0
,
0
);
char
*
expr
;
MPI_Send
(
&
p
,
1
,
DT_PARAMETERS
,
peer
,
TAG_ADD
,
MPI_COMM_WORLD
);
...
...
@@ -115,7 +115,7 @@ void send_expressions_to_add( std::vector<std::string>& results, int peer ) {
expr
=
const_cast
<
char
*>
(
results
[
i
].
c_str
()
);
MPI_Send
(
expr
,
results
[
i
].
length
(),
MPI_CHAR
,
peer
,
TAG_ADD
,
MPI_COMM_WORLD
);
}
results
.
erase
(
results
.
begin
(),
results
.
end
()
);
free
(
lengths
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment