3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
9 * VERSION 3.3.99_development_20071104
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2006, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Groningen Machine for Chemical Simulation
49 #include "gmx_fatal.h"
71 static int comp_xptr(const void *a,const void *b)
79 if ((dx = (xptr[va][XX] - xptr[vb][XX])) < 0)
87 static void do_my_pme(FILE *fp,real tm,gmx_bool bVerbose,t_inputrec *ir,
88 rvec x[],rvec xbuf[],rvec f[],
89 real charge[],real qbuf[],real qqbuf[],
90 matrix box,gmx_bool bSort,
91 t_commrec *cr,t_nsborder *nsb,t_nrnb *nrnb,
92 t_block *excl,real qtot,
93 t_forcerec *fr,int index[],FILE *fp_xvg,
94 int ngroups,unsigned short cENER[])
96 real ener,vcorr,q,xx,dvdl=0,vdip,vcharge;
97 tensor vir,vir_corr,vir_tot;
102 /* Initiate local variables */
108 fprintf(fp,"There are %d energy groups\n",ngroups);
110 for(i=0; (i<ngroups); i++)
111 snew(epme[i],ngroups);
114 /* Put x is in the box, this part needs to be parallellized properly */
115 /*put_atoms_in_box(box,nsb->natoms,x);*/
116 /* Here sorting of X (and q) is done.
117 * Alternatively, one could just put the atoms in one of the
118 * cr->nnodes slabs. That is much cheaper than sorting.
120 for(i=0; (i<nsb->natoms); i++)
124 qsort(index,nsb->natoms,sizeof(index[0]),comp_xptr);
125 xptr = NULL; /* To trap unintentional use of the ptr */
127 /* After sorting we only need the part that is to be computed on
128 * this processor. We also compute the mu_tot here (system dipole)
130 clear_rvec(mu_tot[0]);
131 for(i=START(nsb); (i<START(nsb)+HOMENR(nsb)); i++) {
135 for(m=0; (m<DIM); m++) {
138 mu_tot[0][m] += q*xx;
142 copy_rvec(mu_tot[0],mu_tot[1]);
144 pr_rvec(debug,0,"qbuf",qbuf,nsb->natoms,TRUE);
145 pr_rvecs(debug,0,"xbuf",xbuf,nsb->natoms);
146 pr_rvecs(debug,0,"box",box,DIM);
148 for(ig=0; (ig<ngroups); ig++) {
149 for(jg=ig; (jg<ngroups); jg++) {
151 for(i=START(nsb); (i<START(nsb)+HOMENR(nsb)); i++) {
152 if ((cENER[i] == ig) || (cENER[i] == jg))
161 ener = do_pme(fp,bVerbose,ir,xbuf,f,qptr,qptr,box,cr,
162 nsb,nrnb,vir,fr->ewaldcoeff,FALSE,0,&dvdl,FALSE);
163 vcorr = ewald_LRcorrection(fp,nsb,cr,fr,qptr,qptr,excl,xbuf,box,mu_tot,
164 ir->ewald_geometry,ir->epsilon_surface,
165 0,&dvdl,&vdip,&vcharge);
167 gmx_sum(1,&vcorr,cr);
169 epme[ig][jg] = ener+vcorr;
174 fprintf(fp_xvg,"%10.3f",tm);
175 for(ig=0; (ig<ngroups); ig++) {
176 for(jg=ig; (jg<ngroups); jg++) {
178 epme[ig][jg] -= epme[ig][ig]+epme[jg][jg];
180 fprintf(fp_xvg," %12.5e",epme[ig][jg]);
184 fprintf(fp_xvg,"\n");
187 fprintf(fp,"Time: %10.3f Energy: %12.5e Correction: %12.5e Total: %12.5e\n",
188 tm,ener,vcorr,ener+vcorr);
190 fprintf(fp_xvg,"%10.3f %12.5e %12.5e %12.5e\n",tm,ener+vcorr,vdip,vcharge);
192 m_add(vir,vir_corr,vir_tot);
193 gmx_sum(9,vir_tot[0],cr);
194 pr_rvecs(fp,0,"virial",vir_tot,DIM);
200 int main(int argc,char *argv[])
202 static char *desc[] = {
203 "The [TT]pmetest[tt] program tests the scaling of the PME code. When only given",
204 "a [TT].tpr[tt] file it will compute PME for one frame. When given a trajectory",
205 "it will do so for all the frames in the trajectory. Before the PME",
206 "routine is called the coordinates are sorted along the X-axis.[PAR]",
207 "As an extra service to the public the program can also compute",
208 "long-range Coulomb energies for components of the system. When the",
209 "[TT]-groups[tt] flag is given to the program the energy groups",
210 "from the [TT].tpr[tt] file will be read, and half an energy matrix computed."
213 static t_filenm fnm[] = {
214 { efTPX, NULL, NULL, ffREAD },
215 { efTRN, "-o", NULL, ffWRITE },
216 { efLOG, "-g", "pme", ffWRITE },
217 { efTRX, "-f", NULL, ffOPTRD },
218 { efXVG, "-x", "ener-pme", ffWRITE }
220 #define NFILE asize(fnm)
222 /* Command line options ! */
223 static gmx_bool bVerbose=FALSE;
224 static gmx_bool bOptFFT=FALSE;
225 static gmx_bool bSort=FALSE;
226 static int ewald_geometry=eewg3D;
228 static int pme_order=0;
229 static rvec grid = { -1, -1, -1 };
230 static real rc = 0.0;
231 static real dtol = 0.0;
232 static gmx_bool bGroups = FALSE;
233 static t_pargs pa[] = {
234 { "-np", FALSE, etINT, {&nnodes},
235 "Number of nodes, must be the same as used for [TT]grompp[tt]" },
236 { "-v", FALSE, etBOOL,{&bVerbose},
237 "Be loud and noisy" },
238 { "-sort", FALSE, etBOOL,{&bSort},
239 "Sort coordinates. Crucial for domain decomposition." },
240 { "-grid", FALSE, etRVEC,{&grid},
241 "Number of grid cells in X, Y, Z dimension (if -1 use from [TT].tpr[tt])" },
242 { "-order", FALSE, etINT, {&pme_order},
243 "Order of the PME spreading algorithm" },
244 { "-groups", FALSE, etBOOL, {&bGroups},
245 "Compute half an energy matrix based on the energy groups in your [TT].tpr[tt] file" },
246 { "-rc", FALSE, etREAL, {&rc},
247 "Rcoulomb for Ewald summation" },
248 { "-tol", FALSE, etREAL, {&dtol},
249 "Tolerance for Ewald summation" }
260 int natoms,step,status,i,ncg,root;
261 real t,lambda,ewaldcoeff,qtot;
265 real *charge,*qbuf,*qqbuf;
268 /* Start the actual parallel code if necessary */
269 cr = init_par(&argc,&argv);
273 CopyRight(stderr,argv[0]);
275 /* Parse command line on all processors, arguments are passed on in
276 * init_par (see above)
278 parse_common_args(&argc,argv,
279 PCA_KEEP_ARGS | PCA_NOEXIT_ON_ARGS | PCA_BE_NICE |
280 PCA_CAN_SET_DEFFNM | (MASTER(cr) ? 0 : PCA_QUIET),
281 NFILE,fnm,asize(pa),pa,asize(desc),desc,0,NULL);
285 gmx_fatal(FARGS,"GROMACS compiled without MPI support - can't do parallel runs");
288 /* Open log files on all processors */
289 open_log(ftp2fn(efLOG,NFILE,fnm),cr);
293 /* Read tpr file etc. */
294 read_tpxheader(ftp2fn(efTPX,NFILE,fnm),&tpx,FALSE,NULL,NULL);
296 read_tpx(ftp2fn(efTPX,NFILE,fnm),&step,&t,&lambda,ir,
297 box,&natoms,x,NULL,NULL,&top);
301 for(i=0; (i<natoms); i++) {
302 charge[i] = top.atoms.atom[i].q;
307 if (opt2parg_bSet("-grid",asize(pa),pa)) {
312 /* Check command line parameters for consistency */
313 if ((ir->nkx <= 0) || (ir->nky <= 0) || (ir->nkz <= 0))
314 gmx_fatal(FARGS,"PME grid = %d %d %d",ir->nkx,ir->nky,ir->nkz);
315 if (opt2parg_bSet("-rc",asize(pa),pa))
317 if (ir->rcoulomb <= 0)
318 gmx_fatal(FARGS,"rcoulomb should be > 0 (not %f)",ir->rcoulomb);
319 if (opt2parg_bSet("-order",asize(pa),pa))
320 ir->pme_order = pme_order;
321 if (ir->pme_order <= 0)
322 gmx_fatal(FARGS,"pme_order should be > 0 (not %d)",ir->pme_order);
323 if (opt2parg_bSet("-tol",asize(pa),pa))
324 ir->ewald_rtol = dtol;
325 if (ir->ewald_rtol <= 0)
326 gmx_fatal(FARGS,"ewald_tol should be > 0 (not %f)",ir->ewald_rtol);
332 /* Add parallellization code here */
335 ncg = top.blocks[ebCGS].multinr[0];
336 for(i=0; (i<cr->nnodes-1); i++)
337 top.blocks[ebCGS].multinr[i] = min(ncg,(ncg*(i+1))/cr->nnodes);
338 for( ; (i<MAXNODES); i++)
339 top.blocks[ebCGS].multinr[i] = ncg;
342 /* Set some variables to zero to avoid core dumps */
343 ir->opts.ngtc = ir->opts.ngacc = ir->opts.ngfrz = ir->opts.ngener = 0;
345 /* Distribute the data over processors */
346 MPI_Bcast(&natoms,1,MPI_INT,root,MPI_COMM_WORLD);
347 MPI_Bcast(ir,sizeof(*ir),MPI_BYTE,root,MPI_COMM_WORLD);
348 MPI_Bcast(&qtot,1,GMX_MPI_REAL,root,MPI_COMM_WORLD);
351 /* Call some dedicated communication routines, master sends n-1 times */
353 for(i=1; (i<cr->nnodes); i++) {
354 mv_block(i,&(top.blocks[ebCGS]));
355 mv_block(i,&(top.atoms.excl));
359 ld_block(root,&(top.blocks[ebCGS]));
360 ld_block(root,&(top.atoms.excl));
367 MPI_Bcast(charge,natoms,GMX_MPI_REAL,root,MPI_COMM_WORLD);
370 ewaldcoeff = calc_ewaldcoeff(ir->rcoulomb,ir->ewald_rtol);
374 pr_inputrec(stdlog,0,"Inputrec",ir);
376 /* Allocate memory for temp arrays etc. */
383 /* Initialize the PME code */
384 init_pme(stdlog,cr,ir->nkx,ir->nky,ir->nkz,ir->pme_order,
385 natoms,FALSE,bOptFFT,ewald_geometry);
387 /* MFlops accounting */
390 /* Initialize the work division */
391 calc_nsb(stdlog,&(top.blocks[ebCGS]),cr->nnodes,nsb,0);
392 nsb->nodeid = cr->nodeid;
393 print_nsb(stdlog,"pmetest",nsb);
395 /* Initiate forcerec */
396 mdatoms = atoms2md(stdlog,&top.atoms,ir->opts.nFreeze,ir->eI,
397 ir->delta_t,0,ir->opts.tau_t,FALSE,FALSE);
399 init_forcerec(stdlog,fr,ir,&top,cr,mdatoms,nsb,box,FALSE,NULL,NULL,FALSE);
401 /* First do PME based on coordinates in tpr file, send them to
402 * other processors if needed.
405 fprintf(stdlog,"-----\n"
406 "Results based on tpr file %s\n",ftp2fn(efTPX,NFILE,fnm));
409 MPI_Bcast(x[0],natoms*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
410 MPI_Bcast(box[0],DIM*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
411 MPI_Bcast(&t,1,GMX_MPI_REAL,root,MPI_COMM_WORLD);
414 do_my_pme(stdlog,0,bVerbose,ir,x,xbuf,f,charge,qbuf,qqbuf,box,bSort,
415 cr,nsb,&nrnb,&(top.atoms.excl),qtot,fr,index,NULL,
416 bGroups ? ir->opts.ngener : 1,mdatoms->cENER);
418 /* If we have a trajectry file, we will read the frames in it and compute
421 if (ftp2bSet(efTRX,NFILE,fnm)) {
422 fprintf(stdlog,"-----\n"
423 "Results based on trx file %s\n",ftp2fn(efTRX,NFILE,fnm));
426 natoms = read_first_x(&status,ftp2fn(efTRX,NFILE,fnm),&t,&x,box);
427 if (natoms != top.atoms.nr)
428 gmx_fatal(FARGS,"natoms in trx = %d, in tpr = %d",natoms,top.atoms.nr);
429 fp = xvgropen(ftp2fn(efXVG,NFILE,fnm),"PME Energy","Time (ps)","E (kJ/mol)");
434 /* Send coordinates, box and time to the other nodes */
437 MPI_Bcast(x[0],natoms*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
438 MPI_Bcast(box[0],DIM*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
439 MPI_Bcast(&t,1,GMX_MPI_REAL,root,MPI_COMM_WORLD);
442 rm_pbc(&top.idef,nsb->natoms,box,x,x);
443 /* Call the PME wrapper function */
444 do_my_pme(stdlog,t,bVerbose,ir,x,xbuf,f,charge,qbuf,qqbuf,box,bSort,cr,
445 nsb,&nrnb,&(top.atoms.excl),qtot,fr,index,fp,
446 bGroups ? ir->opts.ngener : 1,mdatoms->cENER);
447 /* Only the master processor reads more data */
449 bCont = read_next_x(status,&t,natoms,x,box);
450 /* Check whether we need to continue */
453 MPI_Bcast(&bCont,1,MPI_INT,root,MPI_COMM_WORLD);
458 /* Finish I/O, close files */
466 /* Do some final I/O about performance, might be useful in debugging */
467 fprintf(stdlog,"-----\n");
468 print_nrnb(stdlog,&nrnb);
471 /* Finish the parallel stuff */
472 if (gmx_parallel_env_initialized())
475 /* Thank the audience, as usual */