diff --git a/src/hierarchical.cpp b/src/hierarchical.cpp
index a912afb9ae6d65e1cae443194b3e0e5e69c1c32e..b5d86f313e8cdba80c789611c7e149f6b577d551 100644
--- a/src/hierarchical.cpp
+++ b/src/hierarchical.cpp
@@ -61,7 +61,7 @@ void create_communicators_hierarch( MPI_Comm& COMM_FOREMEN, MPI_Comm& COMM_TEAM
 
 }
 
-gi::ex multiply_1level_foreman_hierarch_distribute_work( tensor3D_t& T, matrix_int_t& J, int size, parameters_t params, gi::lst symbols, MPI_Comm comm_team ) {
+gi::ex multiply_1level_foreman_hierarch_distribute_work( tensor3D_t& T, matrix_int_t& J, int size, parameters_t params, gi::lst symbols, MPI_Comm comm_team, int rank_foreman /* DEBUG */ ) {
 
     gi::ex Tens = 0;
     gi::ex A;
@@ -149,7 +149,7 @@ gi::ex multiply_1level_foreman_hierarch_distribute_work( tensor3D_t& T, matrix_i
     running = np - 1; // all the slaves are running 
     while( running > 0 ) {
         /* Here we might also receive a TAG_PULL if the data set is too small */
-        MPI_Recv( &len, 1, MPI_UNSIGNED, MPI_ANY_SOURCE, MPI_ANY_TAG/* TAG_RES*/, comm_team, &status );
+        MPI_Recv( &len, 1, MPI_UNSIGNED, MPI_ANY_SOURCE, /*MPI_ANY_TAG/*/ TAG_RES, comm_team, &status );
         src = status.MPI_SOURCE;
 
         if( len != 0 ) {
@@ -178,7 +178,6 @@ gi::ex multiply_1level_foreman_hierarch_distribute_work( tensor3D_t& T, matrix_i
     }
     
 
-    
     if( NULL != expr_c) free( expr_c );
     return Tens;
 }
@@ -231,8 +230,8 @@ void multiply_1level_foreman_hierarch( tensor3D_t& T, matrix_int_t& J, int size,
         if( status.MPI_TAG == TAG_WORK ){
             
             /* Distribute the work on my workers */
-            
-           Tens = multiply_1level_foreman_hierarch_distribute_work( T, J, size, params, symbols, comm_team );
+        
+            Tens = multiply_1level_foreman_hierarch_distribute_work( T, J, size, params, symbols, comm_team, rank /* DEBUG */ );
 
            /* Send the result to the master */