3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
9 * VERSION 3.3.99_development_20071104
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2006, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Groningen Machine for Chemical Simulation
41 #include "gromacs/utility/smalloc.h"
45 #include "gromacs/fileio/tpxio.h"
46 #include "gromacs/commandline/pargs.h"
47 #include "gromacs/utility/futil.h"
48 #include "gromacs/utility/fatalerror.h"
59 #include "gromacs/utility/gmxmpi.h"
65 static int comp_xptr(const void *a,const void *b)
73 if ((dx = (xptr[va][XX] - xptr[vb][XX])) < 0)
81 static void do_my_pme(FILE *fp,real tm,gmx_bool bVerbose,t_inputrec *ir,
82 rvec x[],rvec xbuf[],rvec f[],
83 real charge[],real qbuf[],real qqbuf[],
84 matrix box,gmx_bool bSort,
85 t_commrec *cr,t_nsborder *nsb,t_nrnb *nrnb,
86 t_block *excl,real qtot,
87 t_forcerec *fr,int index[],FILE *fp_xvg,
88 int ngroups,unsigned short cENER[])
90 real ener,vcorr,q,xx,dvdl=0,vdip,vcharge;
91 tensor vir,vir_corr,vir_tot;
96 /* Initiate local variables */
102 fprintf(fp,"There are %d energy groups\n",ngroups);
104 for(i=0; (i<ngroups); i++)
105 snew(epme[i],ngroups);
108 /* Put x is in the box, this part needs to be parallellized properly */
109 /*put_atoms_in_box(box,nsb->natoms,x);*/
110 /* Here sorting of X (and q) is done.
111 * Alternatively, one could just put the atoms in one of the
112 * cr->nnodes slabs. That is much cheaper than sorting.
114 for(i=0; (i<nsb->natoms); i++)
118 qsort(index,nsb->natoms,sizeof(index[0]),comp_xptr);
119 xptr = NULL; /* To trap unintentional use of the ptr */
121 /* After sorting we only need the part that is to be computed on
122 * this processor. We also compute the mu_tot here (system dipole)
124 clear_rvec(mu_tot[0]);
125 for(i=START(nsb); (i<START(nsb)+HOMENR(nsb)); i++) {
129 for(m=0; (m<DIM); m++) {
132 mu_tot[0][m] += q*xx;
136 copy_rvec(mu_tot[0],mu_tot[1]);
138 pr_rvec(debug,0,"qbuf",qbuf,nsb->natoms,TRUE);
139 pr_rvecs(debug,0,"xbuf",xbuf,nsb->natoms);
140 pr_rvecs(debug,0,"box",box,DIM);
142 for(ig=0; (ig<ngroups); ig++) {
143 for(jg=ig; (jg<ngroups); jg++) {
145 for(i=START(nsb); (i<START(nsb)+HOMENR(nsb)); i++) {
146 if ((cENER[i] == ig) || (cENER[i] == jg))
155 ener = do_pme(fp,bVerbose,ir,xbuf,f,qptr,qptr,box,cr,
156 nsb,nrnb,vir,fr->ewaldcoeff,FALSE,0,&dvdl,FALSE);
157 vcorr = ewald_LRcorrection(fp,nsb,cr,fr,qptr,qptr,excl,xbuf,box,mu_tot,
158 ir->ewald_geometry,ir->epsilon_surface,
159 0,&dvdl,&vdip,&vcharge);
161 gmx_sum(1,&vcorr,cr);
163 epme[ig][jg] = ener+vcorr;
168 fprintf(fp_xvg,"%10.3f",tm);
169 for(ig=0; (ig<ngroups); ig++) {
170 for(jg=ig; (jg<ngroups); jg++) {
172 epme[ig][jg] -= epme[ig][ig]+epme[jg][jg];
174 fprintf(fp_xvg," %12.5e",epme[ig][jg]);
178 fprintf(fp_xvg,"\n");
181 fprintf(fp,"Time: %10.3f Energy: %12.5e Correction: %12.5e Total: %12.5e\n",
182 tm,ener,vcorr,ener+vcorr);
184 fprintf(fp_xvg,"%10.3f %12.5e %12.5e %12.5e\n",tm,ener+vcorr,vdip,vcharge);
186 m_add(vir,vir_corr,vir_tot);
187 gmx_sum(9,vir_tot[0],cr);
188 pr_rvecs(fp,0,"virial",vir_tot,DIM);
194 int main(int argc,char *argv[])
196 static char *desc[] = {
197 "The [TT]pmetest[tt] program tests the scaling of the PME code. When only given",
198 "a [TT].tpr[tt] file it will compute PME for one frame. When given a trajectory",
199 "it will do so for all the frames in the trajectory. Before the PME",
200 "routine is called the coordinates are sorted along the X-axis.[PAR]",
201 "As an extra service to the public the program can also compute",
202 "long-range Coulomb energies for components of the system. When the",
203 "[TT]-groups[tt] flag is given to the program the energy groups",
204 "from the [TT].tpr[tt] file will be read, and half an energy matrix computed."
207 static t_filenm fnm[] = {
208 { efTPX, NULL, NULL, ffREAD },
209 { efTRN, "-o", NULL, ffWRITE },
210 { efLOG, "-g", "pme", ffWRITE },
211 { efTRX, "-f", NULL, ffOPTRD },
212 { efXVG, "-x", "ener-pme", ffWRITE }
214 #define NFILE asize(fnm)
216 /* Command line options ! */
217 static gmx_bool bVerbose=FALSE;
218 static gmx_bool bOptFFT=FALSE;
219 static gmx_bool bSort=FALSE;
220 static int ewald_geometry=eewg3D;
222 static int pme_order=0;
223 static rvec grid = { -1, -1, -1 };
224 static real rc = 0.0;
225 static real dtol = 0.0;
226 static gmx_bool bGroups = FALSE;
227 static t_pargs pa[] = {
228 { "-np", FALSE, etINT, {&nnodes},
229 "Number of nodes, must be the same as used for [TT]grompp[tt]" },
230 { "-v", FALSE, etBOOL,{&bVerbose},
231 "Be loud and noisy" },
232 { "-sort", FALSE, etBOOL,{&bSort},
233 "Sort coordinates. Crucial for domain decomposition." },
234 { "-grid", FALSE, etRVEC,{&grid},
235 "Number of grid cells in X, Y, Z dimension (if -1 use from [TT].tpr[tt])" },
236 { "-order", FALSE, etINT, {&pme_order},
237 "Order of the PME spreading algorithm" },
238 { "-groups", FALSE, etBOOL, {&bGroups},
239 "Compute half an energy matrix based on the energy groups in your [TT].tpr[tt] file" },
240 { "-rc", FALSE, etREAL, {&rc},
241 "Rcoulomb for Ewald summation" },
242 { "-tol", FALSE, etREAL, {&dtol},
243 "Tolerance for Ewald summation" }
254 int natoms,step,status,i,ncg,root;
255 real t,lambda,ewaldcoeff,qtot;
259 real *charge,*qbuf,*qqbuf;
262 /* Start the actual parallel code if necessary */
263 cr = init_par(&argc,&argv);
267 CopyRight(stderr,argv[0]);
269 /* Parse command line on all processors, arguments are passed on in
270 * init_par (see above)
272 parse_common_args(&argc,argv,
273 PCA_NOEXIT_ON_ARGS | PCA_BE_NICE |
274 PCA_CAN_SET_DEFFNM | (MASTER(cr) ? 0 : PCA_QUIET),
275 NFILE,fnm,asize(pa),pa,asize(desc),desc,0,NULL);
279 gmx_fatal(FARGS,"GROMACS compiled without MPI support - can't do parallel runs");
282 /* Open log files on all processors */
283 open_log(ftp2fn(efLOG,NFILE,fnm),cr);
287 /* Read tpr file etc. */
288 read_tpxheader(ftp2fn(efTPX,NFILE,fnm),&tpx,FALSE,NULL,NULL);
290 read_tpx(ftp2fn(efTPX,NFILE,fnm),&step,&t,&lambda,ir,
291 box,&natoms,x,NULL,NULL,&top);
295 for(i=0; (i<natoms); i++) {
296 charge[i] = top.atoms.atom[i].q;
301 if (opt2parg_bSet("-grid",asize(pa),pa)) {
306 /* Check command line parameters for consistency */
307 if ((ir->nkx <= 0) || (ir->nky <= 0) || (ir->nkz <= 0))
308 gmx_fatal(FARGS,"PME grid = %d %d %d",ir->nkx,ir->nky,ir->nkz);
309 if (opt2parg_bSet("-rc",asize(pa),pa))
311 if (ir->rcoulomb <= 0)
312 gmx_fatal(FARGS,"rcoulomb should be > 0 (not %f)",ir->rcoulomb);
313 if (opt2parg_bSet("-order",asize(pa),pa))
314 ir->pme_order = pme_order;
315 if (ir->pme_order <= 0)
316 gmx_fatal(FARGS,"pme_order should be > 0 (not %d)",ir->pme_order);
317 if (opt2parg_bSet("-tol",asize(pa),pa))
318 ir->ewald_rtol = dtol;
319 if (ir->ewald_rtol <= 0)
320 gmx_fatal(FARGS,"ewald_tol should be > 0 (not %f)",ir->ewald_rtol);
326 /* Add parallellization code here */
329 ncg = top.blocks[ebCGS].multinr[0];
330 for(i=0; (i<cr->nnodes-1); i++)
331 top.blocks[ebCGS].multinr[i] = min(ncg,(ncg*(i+1))/cr->nnodes);
332 for( ; (i<MAXNODES); i++)
333 top.blocks[ebCGS].multinr[i] = ncg;
336 /* Set some variables to zero to avoid core dumps */
337 ir->opts.ngtc = ir->opts.ngacc = ir->opts.ngfrz = ir->opts.ngener = 0;
339 /* Distribute the data over processors */
340 MPI_Bcast(&natoms,1,MPI_INT,root,MPI_COMM_WORLD);
341 MPI_Bcast(ir,sizeof(*ir),MPI_BYTE,root,MPI_COMM_WORLD);
342 MPI_Bcast(&qtot,1,GMX_MPI_REAL,root,MPI_COMM_WORLD);
345 /* Call some dedicated communication routines, master sends n-1 times */
347 for(i=1; (i<cr->nnodes); i++) {
348 mv_block(i,&(top.blocks[ebCGS]));
349 mv_block(i,&(top.atoms.excl));
353 ld_block(root,&(top.blocks[ebCGS]));
354 ld_block(root,&(top.atoms.excl));
361 MPI_Bcast(charge,natoms,GMX_MPI_REAL,root,MPI_COMM_WORLD);
364 ewaldcoeff = calc_ewaldcoeff(ir->rcoulomb,ir->ewald_rtol);
368 pr_inputrec(stdlog,0,"Inputrec",ir);
370 /* Allocate memory for temp arrays etc. */
377 /* Initialize the PME code */
378 init_pme(stdlog,cr,ir->nkx,ir->nky,ir->nkz,ir->pme_order,
379 natoms,FALSE,bOptFFT,ewald_geometry);
381 /* MFlops accounting */
384 /* Initialize the work division */
385 calc_nsb(stdlog,&(top.blocks[ebCGS]),cr->nnodes,nsb,0);
386 nsb->nodeid = cr->nodeid;
387 print_nsb(stdlog,"pmetest",nsb);
389 /* Initiate forcerec */
390 mdatoms = atoms2md(stdlog,&top.atoms,ir->opts.nFreeze,ir->eI,
391 ir->delta_t,0,ir->opts.tau_t,FALSE,FALSE);
393 init_forcerec(stdlog,fr,ir,&top,cr,mdatoms,nsb,box,FALSE,NULL,NULL,FALSE);
395 /* First do PME based on coordinates in tpr file, send them to
396 * other processors if needed.
399 fprintf(stdlog,"-----\n"
400 "Results based on tpr file %s\n",ftp2fn(efTPX,NFILE,fnm));
403 MPI_Bcast(x[0],natoms*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
404 MPI_Bcast(box[0],DIM*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
405 MPI_Bcast(&t,1,GMX_MPI_REAL,root,MPI_COMM_WORLD);
408 do_my_pme(stdlog,0,bVerbose,ir,x,xbuf,f,charge,qbuf,qqbuf,box,bSort,
409 cr,nsb,&nrnb,&(top.atoms.excl),qtot,fr,index,NULL,
410 bGroups ? ir->opts.ngener : 1,mdatoms->cENER);
412 /* If we have a trajectry file, we will read the frames in it and compute
415 if (ftp2bSet(efTRX,NFILE,fnm)) {
416 fprintf(stdlog,"-----\n"
417 "Results based on trx file %s\n",ftp2fn(efTRX,NFILE,fnm));
420 natoms = read_first_x(&status,ftp2fn(efTRX,NFILE,fnm),&t,&x,box);
421 if (natoms != top.atoms.nr)
422 gmx_fatal(FARGS,"natoms in trx = %d, in tpr = %d",natoms,top.atoms.nr);
423 fp = xvgropen(ftp2fn(efXVG,NFILE,fnm),"PME Energy","Time (ps)","E (kJ/mol)");
428 /* Send coordinates, box and time to the other nodes */
431 MPI_Bcast(x[0],natoms*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
432 MPI_Bcast(box[0],DIM*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
433 MPI_Bcast(&t,1,GMX_MPI_REAL,root,MPI_COMM_WORLD);
436 rm_pbc(&top.idef,nsb->natoms,box,x,x);
437 /* Call the PME wrapper function */
438 do_my_pme(stdlog,t,bVerbose,ir,x,xbuf,f,charge,qbuf,qqbuf,box,bSort,cr,
439 nsb,&nrnb,&(top.atoms.excl),qtot,fr,index,fp,
440 bGroups ? ir->opts.ngener : 1,mdatoms->cENER);
441 /* Only the master processor reads more data */
443 bCont = read_next_x(status,&t,natoms,x,box);
444 /* Check whether we need to continue */
447 MPI_Bcast(&bCont,1,MPI_INT,root,MPI_COMM_WORLD);
452 /* Finish I/O, close files */
460 /* Do some final I/O about performance, might be useful in debugging */
461 fprintf(stdlog,"-----\n");
462 print_nrnb(stdlog,&nrnb);
465 /* Finish the parallel stuff */
466 if (gmx_parallel_env_initialized())
469 /* Thank the audience, as usual */