{
gmx_comm("Initializing threads without comm");
}
- /* once threads will be used together with MPI, we'll
- fill the cr structure with distinct data here. This might even work: */
- cr->sim_nodeid = gmx_setup(0, NULL, &cr->nnodes);
- cr->mpi_comm_mysim = MPI_COMM_WORLD;
- cr->mpi_comm_mygroup = cr->mpi_comm_mysim;
- cr->nodeid = cr->sim_nodeid;
+ gmx_do_mpi_init(0, NULL);
+ gmx_fill_commrec_from_mpi(cr);
+
+ // TODO cr->duty should not be initialized here
cr->duty = (DUTY_PP | DUTY_PME);
return cr;
#endif
}
-int gmx_setup(int gmx_unused *argc, char gmx_unused ***argv, int *nnodes)
+void gmx_do_mpi_init(int gmx_unused *argc, char gmx_unused ***argv)
{
#ifndef GMX_MPI
- gmx_call("gmx_setup");
- return 0;
+ gmx_call("gmx_do_mpi_init");
+#else
+ if (!gmx_mpi_initialized())
+ {
+#ifdef GMX_LIB_MPI
+#ifdef GMX_FAHCORE
+ (void) fah_MPI_Init(argc, argv);
+#else
+ (void) MPI_Init(argc, argv);
+#endif
+#endif
+ }
+#endif
+}
+
+void gmx_fill_commrec_from_mpi(t_commrec *cr)
+{
+#ifndef GMX_MPI
+ gmx_call("gmx_fill_commrec_from_mpi");
#else
char buf[256];
int resultlen; /* actual length of node name */
int mpi_my_rank;
char mpi_hostname[MPI_MAX_PROCESSOR_NAME];
- /* Call the MPI routines */
-#ifdef GMX_LIB_MPI
-#ifdef GMX_FAHCORE
- (void) fah_MPI_Init(argc, argv);
-#else
- (void) MPI_Init(argc, argv);
-#endif
-#endif
- (void) MPI_Comm_size( MPI_COMM_WORLD, &mpi_num_nodes );
- (void) MPI_Comm_rank( MPI_COMM_WORLD, &mpi_my_rank );
+ mpi_num_nodes = gmx_node_num();
+ mpi_my_rank = gmx_node_rank();
(void) MPI_Get_processor_name( mpi_hostname, &resultlen );
#ifdef GMX_LIB_MPI
}
#endif
- *nnodes = mpi_num_nodes;
-
- return mpi_my_rank;
+ cr->nnodes = mpi_num_nodes;
+ cr->nodeid = mpi_my_rank;
+ cr->sim_nodeid = mpi_my_rank;
+ cr->mpi_comm_mysim = MPI_COMM_WORLD;
+ cr->mpi_comm_mygroup = MPI_COMM_WORLD;
#endif
}
}
-void gmx_finalize_par(void)
+void gmx_finalize_mpi(void)
{
#ifndef GMX_MPI
/* Compiled without MPI, no MPI finalizing needed */
return;
#else
- int initialized, finalized;
+ int finalized;
int ret;
- MPI_Initialized(&initialized);
- if (!initialized)
+ if (!gmx_mpi_initialized())
{
return;
}
extern "C" {
#endif
-int gmx_setup(int *argc, char ***argv, int *nnodes);
-/* Initializes the parallel communication, return the ID of the node */
+void gmx_do_mpi_init(int *argc, char ***argv);
+/* Initializes the MPI parallel communication */
+
+void gmx_fill_commrec_from_mpi(t_commrec *cr);
+/* Continues t_commrec construction */
int gmx_node_num(void);
/* return the number of nodes in the ring */
void gmx_abort(int nodeid, int nnodes, int errorno);
/* Abort the parallel run */
-void gmx_finalize_par(void);
+void gmx_finalize_mpi(void);
/* Finish the parallel run in an ordered manner */
#ifdef GMX_DOUBLE
// in CommandLineModuleManager.
t_commrec cr;
std::memset(&cr, 0, sizeof(cr));
- cr.nodeid = gmx_setup(argc, argv, &cr.nnodes);
- cr.mpi_comm_mygroup = MPI_COMM_WORLD;
+ gmx_do_mpi_init(argc, argv);
+ gmx_fill_commrec_from_mpi(&cr);
if (PAR(&cr))
{
broadcastArguments(&cr, argc, argv);
void finalize()
{
- gmx_finalize_par();
+ gmx_finalize_mpi();
}
} // namespace gmx