From: Roland Schulz Date: Sun, 29 Jun 2014 07:32:16 +0000 (-0400) Subject: Merge release-4-6 into release-5-0 X-Git-Url: http://biod.pnpi.spb.ru/gitweb/?a=commitdiff_plain;h=46afc2960825a4dd45176ba86101cafe720e5564;p=alexxy%2Fgromacs.git Merge release-4-6 into release-5-0 Change-Id: I89230f8a117d9636570e573b7b2657c79aea51c0 --- 46afc2960825a4dd45176ba86101cafe720e5564 diff --cc src/external/thread_mpi/include/thread_mpi/atomic.h index 5aa81630f6,0000000000..895db00ff4 mode 100644,000000..100644 --- a/src/external/thread_mpi/include/thread_mpi/atomic.h +++ b/src/external/thread_mpi/include/thread_mpi/atomic.h @@@ -1,643 -1,0 +1,643 @@@ +/* + This source code file is part of thread_mpi. + Written by Sander Pronk, Erik Lindahl, and possibly others. + + Copyright (c) 2009, Sander Pronk, Erik Lindahl. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + 1) Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2) Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3) Neither the name of the copyright holders nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY US ''AS IS'' AND ANY + EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL WE BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + If you want to redistribute modifications, please consider that + scientific software is very special. Version control is crucial - + bugs must be traceable. We will be happy to consider code for + inclusion in the official distribution, but derived work should not + be called official thread_mpi. Details are found in the README & COPYING + files. + */ + +#ifndef TMPI_ATOMIC_H_ +#define TMPI_ATOMIC_H_ + +/*! \file atomic.h + * + * \brief Atomic operations for fast SMP synchronization + * + * This file defines atomic integer operations and spinlocks for + * fast synchronization in performance-critical regions. + * + * In general, the best option is to use functions without explicit + * locking, e.g. tMPI_Atomic_fetch_add() or tMPI_Atomic_cas(). + * + * Depending on the architecture/compiler, these operations may either + * be provided as functions or macros; be aware that those macros may + * reference their arguments repeatedly, possibly leading to multiply + * evaluated code with side effects: be careful with what you use as + * arguments. + * + * Not all architectures support atomic operations though inline assembly, + * and even if they do it might not be implemented here. In that case + * we use a fallback mutex implementation, so you can always count on + * the function interfaces working. + * + * Don't use spinlocks in non-performance-critical regions like file I/O. + * Since they always spin busy they would waste CPU cycles instead of + * properly yielding to a computation thread while waiting for the disk. + * + * Finally, note that all our spinlock operations are defined to return + * 0 if initialization or locking completes successfully. + * This is the opposite of some other implementations, but the same standard + * as used for pthread mutexes. So, if e.g. are trying to lock a spinlock, + * you will have gotten the lock if the return value is 0. + * + * tMPI_Spinlock_islocked(x) obviously still returns 1 if the lock is locked, + * and 0 if it is available, though... + */ +/* Se the comments on the non-atomic versions for explanations */ + +#include + +#include "visibility.h" + +#ifdef __cplusplus +extern "C" +{ +#endif +#if 0 +} /* Avoids screwing up auto-indentation */ +#endif + +/* Setting TMPI_ATOMICS_DISABLED permits the build to enforce that no + * atomic operations are used. This is used when building to run + * ThreadSanitzer. + * + * It could also be useful as a temporary measure on some + * compiler+hardware for which the detection below fails to produce a + * correct result. Performance will be greatly improved by using + * whatever atomic operations are available, so make sure such a + * measure is only temporary! */ +#ifdef TMPI_ATOMICS_DISABLED + +#ifndef DOXYGEN +#define TMPI_NO_ATOMICS +#endif + +#else + +/* first check for gcc/icc platforms. + Some compatible compilers, like icc on linux+mac will take this path, + too */ +#if ( (defined(__GNUC__) || defined(__PATHSCALE__) || defined(__PGI)) && \ + (!defined(__xlc__)) && (!defined(_CRAYC)) && (!defined(TMPI_TEST_NO_ATOMICS)) ) + +#ifdef __GNUC__ +#define TMPI_GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) +#endif + +/* now check specifically for several architectures: */ - #if ((defined(i386) || defined(__x86_64__)) && !defined(__OPEN64__)) ++#if ((defined(__i386__) || defined(__x86_64__)) && !defined(__OPEN64__)) +/* first x86: */ +#include "atomic/gcc_x86.h" + +#elif (defined(__ia64__)) +/* then ia64: */ +#include "atomic/gcc_ia64.h" + +/* for now we use gcc intrinsics on gcc: */ +/*#elif (defined(__powerpc__) || (defined(__ppc__)) )*/ +/*#include "atomic/gcc_ppc.h"*/ + +#elif defined(__FUJITSU) && ( defined(__sparc_v9__) || defined (__sparcv9) ) + +/* Fujitsu FX10 SPARC compiler */ +#include "atomic/fujitsu_sparc64.h" + +#else +/* otherwise, there's a generic gcc intrinsics version: */ +#include "atomic/gcc.h" + +#endif /* end of check for gcc specific architectures */ + +/* not gcc: */ +#elif (defined(_MSC_VER) && (_MSC_VER >= 1200) && \ + (!defined(TMPI_TEST_NO_ATOMICS)) ) + +/* Microsoft Visual C on x86, define taken from FFTW who got it from + Morten Nissov. icc on windows will take this path. */ +#include "atomic/msvc.h" + +#elif ( (defined(__IBM_GCC_ASM) || defined(__IBM_STDCPP_ASM)) && \ + (defined(__powerpc__) || defined(__ppc__)) && \ + (!defined(TMPI_TEST_NO_ATOMICS)) ) + +/* PowerPC using xlC intrinsics. */ + +#include "atomic/xlc_ppc.h" + +#elif ( ( defined(__xlC__) || defined(__xlc__) ) && \ + (!defined(TMPI_TEST_NO_ATOMICS)) ) +/* IBM xlC compiler */ +#include "atomic/xlc_ppc.h" + + +#elif (defined (__sun) && (defined(__sparcv9) || defined(__sparc)) && \ + (!defined(TMPI_TEST_NO_ATOMICS)) ) +/* Solaris on SPARC (Sun C Compiler, Solaris Studio) */ +#include "atomic/suncc-sparc.h" + +#elif defined(__FUJITSU) && defined(__sparc__) + +/* Fujitsu FX10 SPARC compiler requires gcc compatibility with -Xg */ +#error Atomics support for Fujitsu FX10 compiler requires -Xg (gcc compatibility) + +#elif defined(_CRAYC) + +/* Cray compiler */ +#include "atomic/cce.h" +#else + +#ifndef DOXYGEN +/** Indicates that no support for atomic operations is present. */ +#define TMPI_NO_ATOMICS +#endif + +#endif /* platform-specific checks */ + +#endif /* TMPI_NO_ATOMICS */ + +#ifdef TMPI_NO_ATOMICS + +/* No atomic operations, use mutex fallback. Documentation is in x86 section */ + +#ifdef TMPI_CHECK_ATOMICS +#error No atomic operations implemented for this cpu/compiler combination. +#endif + + +/** Memory barrier operation + + Modern CPUs rely heavily on out-of-order execution, and one common feature + is that load/stores might be reordered. Also, when using inline assembly + the compiler might already have loaded the variable we are changing into + a register, so any update to memory won't be visible. + + This command creates a memory barrier, i.e. all memory results before + it in the code should be visible to all memory operations after it - the + CPU cannot propagate load/stores across it. + + This barrier is a full barrier: all load and store operations of + instructions before it are completed, while all load and store operations + that are in instructions after it won't be done before this barrier. + + \hideinitializer + */ +#define tMPI_Atomic_memory_barrier() + +/** Memory barrier operation with acquire semantics + + This barrier is a barrier with acquire semantics: the terminology comes + from its common use after acquiring a lock: all load/store instructions + after this barrier may not be re-ordered to happen before this barrier. + + \hideinitializer + */ +#define tMPI_Atomic_memory_barrier_acq() + +/** Memory barrier operation with release semantics + + This barrier is a barrier with release semantics: the terminology comes + from its common use before releasing a lock: all load/store instructions + before this barrier may not be re-ordered to happen after this barrier. + + \hideinitializer + */ +#define tMPI_Atomic_memory_barrier_rel() + +#ifndef DOXYGEN +/* signal that they exist */ +#define TMPI_HAVE_ACQ_REL_BARRIERS +#endif + +/** Atomic operations datatype + * + * Portable synchronization primitives like mutexes are effective for + * many purposes, but usually not very high performance. + * One of the problem is that you have the overhead of a function call, + * and another is that Mutexes often have extra overhead to make the + * scheduling fair. Finally, if performance is important we don't want + * to suspend the thread if we cannot lock a mutex, but spin-lock at 100% + * CPU usage until the resources is available (e.g. increment a counter). + * + * These things can often be implemented with inline-assembly or other + * system-dependent functions, and we provide such functionality for the + * most common platforms. For portability we also have a fallback + * implementation using a mutex for locking. + * + * Performance-wise, the fastest solution is always to avoid locking + * completely (obvious, but remember it!). If you cannot do that, the + * next best thing is to use atomic operations that e.g. increment a + * counter without explicit locking. Spinlocks are useful to lock an + * entire region, but leads to more overhead and can be difficult to + * debug - it is up to you to make sure that only the thread owning the + * lock unlocks it! + * + * You should normally NOT use atomic operations for things like + * I/O threads. These should yield to other threads while waiting for + * the disk instead of spinning at 100% CPU usage. + * + * It is imperative that you use the provided routines for reading + * and writing, since some implementations require memory barriers before + * the CPU or memory sees an updated result. The structure contents is + * only visible here so it can be inlined for performance - it might + * change without further notice. + * + * \note No initialization is required for atomic variables. + * + * Currently, we have (real) atomic operations for: + * + * - gcc version 4.1 and later (all platforms) + * - x86 or x86_64, using GNU compilers + * - x86 or x86_64, using Intel compilers + * - x86 or x86_64, using Pathscale compilers + * - Itanium, using GNU compilers + * - Itanium, using Intel compilers + * - Itanium, using HP compilers + * - PowerPC, using GNU compilers + * - PowerPC, using IBM AIX compilers + * - PowerPC, using IBM compilers >=7.0 under Linux or Mac OS X. + * - Sparc64, using Fujitsu compilers. + * + * \see + * - tMPI_Atomic_get + * - tMPI_Atomic_set + * - tMPI_Atomic_cas + * - tMPI_Atomic_add_return + * - tMPI_Atomic_fetch_add + */ +typedef struct tMPI_Atomic +{ + int value; /**< The atomic value.*/ +} +tMPI_Atomic_t; + + +/** Atomic pointer type equivalent to tMPI_Atomic_t + * + * Useful for lock-free and wait-free data structures. + * The only operations available for this type are: + * \see + * - tMPI_Atomic_ptr_get + * - tMPI_Atomic_ptr_set + * - tMPI_Atomic_ptr_cas + */ +typedef struct tMPI_Atomic_ptr +{ + void *value; /**< The atomic pointer. */ +} +tMPI_Atomic_ptr_t; + + +/** Spinlock + * + * Spinlocks provide a faster synchronization than mutexes, + * although they consume CPU-cycles while waiting. They are implemented + * with atomic operations and inline assembly whenever possible, and + * otherwise we use a fallback implementation where a spinlock is identical + * to a mutex (this is one of the reasons why you have to initialize them). + * + * There are no guarantees whatsoever about fair scheduling or + * debugging if you make a mistake and unlock a variable somebody + * else has locked - performance is the primary goal of spinlocks. + * + * \see + * - tMPI_Spinlock_init + * - tMPI_Spinlock_lock + * - tMPI_Spinlock_unlock + * - tMPI_Spinlock_trylock + * - tMPI_Spinlock_wait + */ +typedef struct tMPI_Spinlock *tMPI_Spinlock_t; + +/*! \def TMPI_SPINLOCK_INITIALIZER + * \brief Spinlock static initializer + * + * This is used for static spinlock initialization, and has the same + * properties as TMPI_THREAD_MUTEX_INITIALIZER has for mutexes. + * This is only for inlining in the tMPI_Thread.h header file. Whether + * it is 0, 1, or something else when unlocked depends on the platform. + * Don't assume anything about it. It might even be a mutex when using the + * fallback implementation! + * + * \hideinitializer + */ +#define TMPI_SPINLOCK_INITIALIZER { NULL } + +/* Since mutexes guarantee memory barriers this works fine */ +/** Return value of an atomic integer + * + * Also implements proper memory barriers when necessary. + * The actual implementation is system-dependent. + * + * \param a Atomic variable to read + * \return Integer value of the atomic variable + * + * \hideinitializer + */ +TMPI_EXPORT +int tMPI_Atomic_get(const tMPI_Atomic_t *a); + +/** Write value to an atomic integer + * + * Also implements proper memory barriers when necessary. + * The actual implementation is system-dependent. + * + * \param a Atomic variable + * \param i Integer to set the atomic variable to. + * + * \hideinitializer + */ +TMPI_EXPORT +void tMPI_Atomic_set(tMPI_Atomic_t *a, int i); + + +/** Return value of an atomic pointer + * + * Also implements proper memory barriers when necessary. + * The actual implementation is system-dependent. + * + * \param a Atomic variable to read + * \return Pointer value of the atomic variable + * + * \hideinitializer + */ +TMPI_EXPORT +void* tMPI_Atomic_ptr_get(const tMPI_Atomic_ptr_t *a); + + + + +/** Write value to an atomic pointer + * + * Also implements proper memory barriers when necessary. + * The actual implementation is system-dependent. + * + * \param a Atomic variable + * \param p Pointer value to set the atomic variable to. + * + * \hideinitializer + */ +TMPI_EXPORT +void tMPI_Atomic_ptr_set(tMPI_Atomic_ptr_t *a, void *p); + +/** Add integer to atomic variable + * + * Also implements proper memory barriers when necessary. + * The actual implementation is system-dependent. + * + * \param a atomic datatype to modify + * \param i integer to increment with. Use i<0 to subtract atomically. + * + * \return The new value (after summation). + */ +TMPI_EXPORT +int tMPI_Atomic_add_return(tMPI_Atomic_t *a, int i); +#ifndef DOXYGEN +#define TMPI_ATOMIC_HAVE_NATIVE_ADD_RETURN +#endif + + + +/** Add to variable, return the old value. + * + * This operation is quite useful for synchronization counters. + * By performing a fetchadd with N, a thread can e.g. reserve a chunk + * with the next N iterations, and the return value is the index + * of the first element to treat. + * + * Also implements proper memory barriers when necessary. + * The actual implementation is system-dependent. + * + * \param a atomic datatype to modify + * \param i integer to increment with. Use i<0 to subtract atomically. + * + * \return The value of the atomic variable before addition. + */ +TMPI_EXPORT +int tMPI_Atomic_fetch_add(tMPI_Atomic_t *a, int i); +#ifndef DOXYGEN +#define TMPI_ATOMIC_HAVE_NATIVE_FETCH_ADD +#endif + + + +/** Atomic compare-and-swap operation + * + * The \a old value is compared with the memory value in the atomic datatype. + * If the are identical, the atomic type is swapped with the new value, + * and otherwise left unchanged. + * + * This is *the* synchronization primitive: it has a consensus number of + * infinity, and is available in some form on all modern CPU architectures. + * In the words of Herlihy&Shavit (The art of multiprocessor programming), + * it is the 'king of all wild things'. + * + * In practice, use it as follows: You can start by reading a value + * (without locking anything), perform some calculations, and then + * atomically try to update it in memory unless it has changed. If it has + * changed you will get an error return code - reread the new value + * an repeat the calculations in that case. + * + * \param a Atomic datatype ('memory' value) + * \param old_val Integer value read from the atomic type at an earlier point + * \param new_val New value to write to the atomic type if it currently is + * identical to the old value. + * + * \return True (1) if the swap occurred: i.e. if the value in a was equal + * to old_val. False (0) if the swap didn't occur and the value + * was not equal to old_val. + * + * \note The exchange occured if the return value is identical to \a old. + */ +TMPI_EXPORT +int tMPI_Atomic_cas(tMPI_Atomic_t *a, int old_val, int new_val); + + + + +/** Atomic pointer compare-and-swap operation + * + * The \a old value is compared with the memory value in the atomic datatype. + * If the are identical, the atomic type is swapped with the new value, + * and otherwise left unchanged. + * + * This is essential for implementing wait-free lists and other data + * structures. See 'tMPI_Atomic_cas()'. + * + * \param a Atomic datatype ('memory' value) + * \param old_val Pointer value read from the atomic type at an earlier point + * \param new_val New value to write to the atomic type if it currently is + * identical to the old value. + * + * \return True (1) if the swap occurred: i.e. if the value in a was equal + * to old_val. False (0) if the swap didn't occur and the value + * was not equal to old_val. + * + * \note The exchange occured if the return value is identical to \a old. + */ +TMPI_EXPORT +int tMPI_Atomic_ptr_cas(tMPI_Atomic_ptr_t * a, void *old_val, + void *new_val); + +/** Atomic swap operation. + + Atomically swaps the data in the tMPI_Atomic_t operand with the value of b. + Note: This has no good assembly counterparts on many architectures, so + it might not be faster than a repreated CAS. + + \param a Pointer to atomic type + \param b Value to swap + \return the original value of a + */ +TMPI_EXPORT +int tMPI_Atomic_swap(tMPI_Atomic_t *a, int b); + +/** Atomic swap pointer operation. + + Atomically swaps the pointer in the tMPI_Atomic_ptr_t operand with the + value of b. + Note: This has no good assembly counterparts on many architectures, so + it might not be faster than a repreated CAS. + + \param a Pointer to atomic type + \param b Value to swap + \return the original value of a + */ +TMPI_EXPORT +void *tMPI_Atomic_ptr_swap(tMPI_Atomic_ptr_t *a, void *b); +#ifndef DOXYGEN +#define TMPI_ATOMIC_HAVE_NATIVE_SWAP +#endif + + +/** Initialize spinlock + * + * In theory you can call this from multiple threads, but remember + * that we don't check for errors. If the first thread proceeded to + * lock the spinlock after initialization, the second will happily + * overwrite the contents and unlock it without warning you. + * + * \param x Spinlock pointer. + * + * \hideinitializer + */ +TMPI_EXPORT +void tMPI_Spinlock_init( tMPI_Spinlock_t *x); +#ifndef DOXYGEN +#define TMPI_ATOMIC_HAVE_NATIVE_SPINLOCK +#endif + +/** Acquire spinlock + * + * This routine blocks until the spinlock is available, and + * the locks it again before returning. + * + * \param x Spinlock pointer + */ +TMPI_EXPORT +void tMPI_Spinlock_lock( tMPI_Spinlock_t *x); + + +/** Attempt to acquire spinlock + * + * This routine acquires the spinlock if possible, but if + * already locked it return an error code immediately. + * + * \param x Spinlock pointer + * + * \return 0 if the mutex was available so we could lock it, + * otherwise a non-zero integer (1) if the lock is busy. + */ +TMPI_EXPORT +int tMPI_Spinlock_trylock( tMPI_Spinlock_t *x); + +/** Release spinlock + * + * \param x Spinlock pointer + * + * Unlocks the spinlock, regardless if which thread locked it. + */ +TMPI_EXPORT +void tMPI_Spinlock_unlock( tMPI_Spinlock_t *x); + + + +/** Check if spinlock is locked + * + * This routine returns immediately with the lock status. + * + * \param x Spinlock pointer + * + * \return 1 if the spinlock is locked, 0 otherwise. + */ +TMPI_EXPORT +int tMPI_Spinlock_islocked( tMPI_Spinlock_t *x); + +/** Wait for a spinlock to become available + * + * This routine blocks until the spinlock is unlocked, + * but in contrast to tMPI_Spinlock_lock() it returns without + * trying to lock the spinlock. + * + * \param x Spinlock pointer + */ +TMPI_EXPORT +void tMPI_Spinlock_wait(tMPI_Spinlock_t *x); + + +#endif /* TMPI_NO_ATOMICS */ + +/* now define all the atomics that are not avaible natively. These + are done on the assumption that a native CAS does exist. */ +#include "atomic/derived.h" + +/* this allows us to use the inline keyword without breaking support for + some compilers that don't support it: */ +#ifdef inline_defined_in_atomic +#undef inline +#endif + +#if !defined(TMPI_NO_ATOMICS) && !defined(TMPI_ATOMICS) +/* Set it here to make sure the user code can check this without having to have + a config.h */ +/** Indicates that support for atomic operations is present. */ +#define TMPI_ATOMICS +#endif + + +#ifdef __cplusplus +} +#endif + + +#endif /* TMPI_ATOMIC_H_ */ diff --cc src/gromacs/gmxana/gmx_current.c index ec1a988f5d,0000000000..94b6d94db2 mode 100644,000000..100644 --- a/src/gromacs/gmxana/gmx_current.c +++ b/src/gromacs/gmxana/gmx_current.c @@@ -1,1008 -1,0 +1,1008 @@@ +/* + * This file is part of the GROMACS molecular simulation package. + * + * Copyright (c) 2008,2009,2010,2011,2012,2013,2014, by the GROMACS development team, led by + * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, + * and including many others, as listed in the AUTHORS file in the + * top-level source directory and at http://www.gromacs.org. + * + * GROMACS is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * GROMACS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with GROMACS; if not, see + * http://www.gnu.org/licenses, or write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * If you want to redistribute modifications to GROMACS, please + * consider that scientific software is very special. Version + * control is crucial - bugs must be traceable. We will be happy to + * consider code for inclusion in the official distribution, but + * derived work must not be called official GROMACS. Details are found + * in the README & COPYING files - if they are missing, get the + * official version at http://www.gromacs.org. + * + * To help us fund GROMACS development, we humbly ask that you cite + * the research papers on the package. Check out http://www.gromacs.org. + */ +#ifdef HAVE_CONFIG_H +#include +#endif + +#include "gromacs/commandline/pargs.h" +#include "typedefs.h" +#include "gromacs/utility/smalloc.h" +#include "vec.h" +#include "gromacs/fileio/tpxio.h" +#include "gromacs/fileio/trxio.h" +#include "xvgr.h" +#include "rmpbc.h" +#include "pbc.h" +#include "physics.h" +#include "index.h" +#include "gromacs/statistics/statistics.h" +#include "gmx_ana.h" +#include "macros.h" + +#include "gromacs/legacyheaders/gmx_fatal.h" + +#define SQR(x) (pow(x, 2.0)) +#define EPSI0 (EPSILON0*E_CHARGE*E_CHARGE*AVOGADRO/(KILO*NANO)) /* EPSILON0 in SI units */ + +static void index_atom2mol(int *n, int *index, t_block *mols) +{ + int nat, i, nmol, mol, j; + + nat = *n; + i = 0; + nmol = 0; + mol = 0; + while (i < nat) + { + while (index[i] > mols->index[mol]) + { + mol++; + if (mol >= mols->nr) + { + gmx_fatal(FARGS, "Atom index out of range: %d", index[i]+1); + } + } + for (j = mols->index[mol]; j < mols->index[mol+1]; j++) + { + if (i >= nat || index[i] != j) + { + gmx_fatal(FARGS, "The index group does not consist of whole molecules"); + } + i++; + } + index[nmol++] = mol; + } + + fprintf(stderr, "\nSplit group of %d atoms into %d molecules\n", nat, nmol); + + *n = nmol; +} + +static gmx_bool precalc(t_topology top, real mass2[], real qmol[]) +{ + + real mtot; + real qtot; + real qall; + int i, j, k, l; + int ai, ci; + gmx_bool bNEU; + ai = 0; + ci = 0; + qall = 0.0; + + + + for (i = 0; i < top.mols.nr; i++) + { + k = top.mols.index[i]; + l = top.mols.index[i+1]; + mtot = 0.0; + qtot = 0.0; + + for (j = k; j < l; j++) + { + mtot += top.atoms.atom[j].m; + qtot += top.atoms.atom[j].q; + } + + for (j = k; j < l; j++) + { + top.atoms.atom[j].q -= top.atoms.atom[j].m*qtot/mtot; + mass2[j] = top.atoms.atom[j].m/mtot; + qmol[j] = qtot; + } + + + qall += qtot; + + if (qtot < 0.0) + { + ai++; + } + if (qtot > 0.0) + { + ci++; + } + } + + if (fabs(qall) > 0.01) + { + printf("\n\nSystem not neutral (q=%f) will not calculate translational part of the dipole moment.\n", qall); + bNEU = FALSE; + } + else + { + bNEU = TRUE; + } + + return bNEU; + +} + +static void remove_jump(matrix box, int natoms, rvec xp[], rvec x[]) +{ + + rvec hbox; + int d, i, m; + + for (d = 0; d < DIM; d++) + { + hbox[d] = 0.5*box[d][d]; + } + for (i = 0; i < natoms; i++) + { + for (m = DIM-1; m >= 0; m--) + { + while (x[i][m]-xp[i][m] <= -hbox[m]) + { + for (d = 0; d <= m; d++) + { + x[i][d] += box[m][d]; + } + } + while (x[i][m]-xp[i][m] > hbox[m]) + { + for (d = 0; d <= m; d++) + { + x[i][d] -= box[m][d]; + } + } + } + } +} + +static void calc_mj(t_topology top, int ePBC, matrix box, gmx_bool bNoJump, int isize, int index0[], \ + rvec fr[], rvec mj, real mass2[], real qmol[]) +{ + + int i, j, k, l; + rvec tmp; + rvec center; + rvec mt1, mt2; + t_pbc pbc; + + + calc_box_center(ecenterRECT, box, center); + + if (!bNoJump) + { + set_pbc(&pbc, ePBC, box); + } + + clear_rvec(tmp); + + + for (i = 0; i < isize; i++) + { + clear_rvec(mt1); + clear_rvec(mt2); + k = top.mols.index[index0[i]]; + l = top.mols.index[index0[i+1]]; + for (j = k; j < l; j++) + { + svmul(mass2[j], fr[j], tmp); + rvec_inc(mt1, tmp); + } + + if (bNoJump) + { + svmul(qmol[k], mt1, mt1); + } + else + { + pbc_dx(&pbc, mt1, center, mt2); + svmul(qmol[k], mt2, mt1); + } + + rvec_inc(mj, mt1); + + } + +} + + +static real calceps(real prefactor, real md2, real mj2, real cor, real eps_rf, gmx_bool bCOR) +{ + + /* bCOR determines if the correlation is computed via + * static properties (FALSE) or the correlation integral (TRUE) + */ + + real epsilon = 0.0; + + + if (bCOR) + { + epsilon = md2-2.0*cor+mj2; + } + else + { + epsilon = md2+mj2+2.0*cor; + } + + if (eps_rf == 0.0) + { + epsilon = 1.0+prefactor*epsilon; + + } + else + { + epsilon = 2.0*eps_rf+1.0+2.0*eps_rf*prefactor*epsilon; + epsilon /= 2.0*eps_rf+1.0-prefactor*epsilon; + } + + + return epsilon; + +} + + +static real calc_cacf(FILE *fcacf, real prefactor, real cacf[], real time[], int nfr, int vfr[], int ei, int nshift) +{ + + int i; + real corint; + real deltat = 0.0; + real rfr; + real sigma = 0.0; + real sigma_ret = 0.0; + corint = 0.0; + + if (nfr > 1) + { + i = 0; + + while (i < nfr) + { + + rfr = (real) (nfr/nshift-i/nshift); + cacf[i] /= rfr; + + if (time[vfr[i]] <= time[vfr[ei]]) + { + sigma_ret = sigma; + } + + fprintf(fcacf, "%.3f\t%10.6g\t%10.6g\n", time[vfr[i]], cacf[i], sigma); + + if ((i+1) < nfr) + { + deltat = (time[vfr[i+1]]-time[vfr[i]]); + } + corint = 2.0*deltat*cacf[i]*prefactor; + if (i == 0 || (i+1) == nfr) + { + corint *= 0.5; + } + sigma += corint; + + i++; + } + + } + else + { + printf("Too less points.\n"); + } + + return sigma_ret; + +} + +static void calc_mjdsp(FILE *fmjdsp, real prefactor, real dsp2[], real time[], int nfr, real refr[]) +{ + + int i; + real rtmp; + real rfr; + + + fprintf(fmjdsp, "#Prefactor fit E-H: 1 / 6.0*V*k_B*T: %g\n", prefactor); + + + + for (i = 0; i < nfr; i++) + { + + if (refr[i] != 0.0) + { + dsp2[i] *= prefactor/refr[i]; + fprintf(fmjdsp, "%.3f\t%10.6g\n", time[i], dsp2[i]); + } + + + } + +} + + +static void dielectric(FILE *fmj, FILE *fmd, FILE *outf, FILE *fcur, FILE *mcor, + FILE *fmjdsp, gmx_bool bNoJump, gmx_bool bACF, gmx_bool bINT, + int ePBC, t_topology top, t_trxframe fr, real temp, + real trust, real bfit, real efit, real bvit, real evit, + t_trxstatus *status, int isize, int nmols, int nshift, + atom_id *index0, int indexm[], real mass2[], + real qmol[], real eps_rf, const output_env_t oenv) +{ + int i, j, k, l, f; + int valloc, nalloc, nfr, nvfr, m, itrust = 0; + int vshfr; + real *xshfr = NULL; + int *vfr = NULL; + real refr = 0.0; + real rfr = 0.0; + real *cacf = NULL; + real *time = NULL; + real *djc = NULL; + real corint = 0.0; + real prefactorav = 0.0; + real prefactor = 0.0; + real volume; + real volume_av = 0.0; + real dk_s, dk_d, dk_f; + real dm_s, dm_d; + real mj = 0.0; + real mj2 = 0.0; + real mjd = 0.0; + real mjdav = 0.0; + real md2 = 0.0; + real mdav2 = 0.0; + real sgk; + rvec mja_tmp; + rvec mjd_tmp; + rvec mdvec; + rvec *mu = NULL; + rvec *xp = NULL; + rvec *v0 = NULL; + rvec *mjdsp = NULL; + real *dsp2 = NULL; + real t0; + real rtmp; + real qtmp; + + + + rvec tmp; + rvec *mtrans = NULL; + + /* + * Variables for the least-squares fit for Einstein-Helfand and Green-Kubo + */ + + int bi, ei, ie, ii; + real rest = 0.0; + real sigma = 0.0; + real malt = 0.0; + real err = 0.0; + real *xfit; + real *yfit; + gmx_rmpbc_t gpbc = NULL; + + /* + * indices for EH + */ + + ei = 0; + bi = 0; + + /* + * indices for GK + */ + + ii = 0; + ie = 0; + t0 = 0; + sgk = 0.0; + + + /* This is the main loop over frames */ + + + nfr = 0; + + + nvfr = 0; + vshfr = 0; + nalloc = 0; + valloc = 0; + + clear_rvec(mja_tmp); + clear_rvec(mjd_tmp); + clear_rvec(mdvec); + clear_rvec(tmp); + gpbc = gmx_rmpbc_init(&top.idef, ePBC, fr.natoms); + + do + { + + refr = (real)(nfr+1); + + if (nfr >= nalloc) + { + nalloc += 100; + srenew(time, nalloc); + srenew(mu, nalloc); + srenew(mjdsp, nalloc); + srenew(dsp2, nalloc); + srenew(mtrans, nalloc); + srenew(xshfr, nalloc); + + for (i = nfr; i < nalloc; i++) + { + clear_rvec(mjdsp[i]); + clear_rvec(mu[i]); + clear_rvec(mtrans[i]); + dsp2[i] = 0.0; + xshfr[i] = 0.0; + } + } + + + + if (nfr == 0) + { + t0 = fr.time; + + } + + time[nfr] = fr.time-t0; + + if (time[nfr] <= bfit) + { + bi = nfr; + } + if (time[nfr] <= efit) + { + ei = nfr; + } + + if (bNoJump) + { + + if (xp) + { + remove_jump(fr.box, fr.natoms, xp, fr.x); + } + else + { + snew(xp, fr.natoms); + } + + for (i = 0; i < fr.natoms; i++) + { + copy_rvec(fr.x[i], xp[i]); + } + + } + + gmx_rmpbc_trxfr(gpbc, &fr); + + calc_mj(top, ePBC, fr.box, bNoJump, nmols, indexm, fr.x, mtrans[nfr], mass2, qmol); + + for (i = 0; i < isize; i++) + { + j = index0[i]; + svmul(top.atoms.atom[j].q, fr.x[j], fr.x[j]); + rvec_inc(mu[nfr], fr.x[j]); + } + + /*if(mod(nfr,nshift)==0){*/ + if (nfr%nshift == 0) + { + for (j = nfr; j >= 0; j--) + { + rvec_sub(mtrans[nfr], mtrans[j], tmp); + dsp2[nfr-j] += norm2(tmp); + xshfr[nfr-j] += 1.0; + } + } + + if (fr.bV) + { + if (nvfr >= valloc) + { + valloc += 100; + srenew(vfr, valloc); + if (bINT) + { + srenew(djc, valloc); + } + srenew(v0, valloc); + if (bACF) + { + srenew(cacf, valloc); + } + } + if (time[nfr] <= bvit) + { + ii = nvfr; + } + if (time[nfr] <= evit) + { + ie = nvfr; + } + vfr[nvfr] = nfr; + clear_rvec(v0[nvfr]); + if (bACF) + { + cacf[nvfr] = 0.0; + } + if (bINT) + { + djc[nvfr] = 0.0; + } + for (i = 0; i < isize; i++) + { + j = index0[i]; + svmul(mass2[j], fr.v[j], fr.v[j]); + svmul(qmol[j], fr.v[j], fr.v[j]); + rvec_inc(v0[nvfr], fr.v[j]); + } + + fprintf(fcur, "%.3f\t%.6f\t%.6f\t%.6f\n", time[nfr], v0[nfr][XX], v0[nfr][YY], v0[nfr][ZZ]); + if (bACF || bINT) + { + /*if(mod(nvfr,nshift)==0){*/ + if (nvfr%nshift == 0) + { + for (j = nvfr; j >= 0; j--) + { + if (bACF) + { + cacf[nvfr-j] += iprod(v0[nvfr], v0[j]); + } + if (bINT) + { + djc[nvfr-j] += iprod(mu[vfr[j]], v0[nvfr]); + } + } + vshfr++; + } + } + nvfr++; + } + + volume = det(fr.box); + volume_av += volume; + + rvec_inc(mja_tmp, mtrans[nfr]); + mjd += iprod(mu[nfr], mtrans[nfr]); + rvec_inc(mdvec, mu[nfr]); + + mj2 += iprod(mtrans[nfr], mtrans[nfr]); + md2 += iprod(mu[nfr], mu[nfr]); + + fprintf(fmj, "%.3f\t%8.5f\t%8.5f\t%8.5f\t%8.5f\t%8.5f\n", time[nfr], mtrans[nfr][XX], mtrans[nfr][YY], mtrans[nfr][ZZ], mj2/refr, norm(mja_tmp)/refr); + fprintf(fmd, "%.3f\t%8.5f\t%8.5f\t%8.5f\t%8.5f\t%8.5f\n", \ + time[nfr], mu[nfr][XX], mu[nfr][YY], mu[nfr][ZZ], md2/refr, norm(mdvec)/refr); + + nfr++; + + } + while (read_next_frame(oenv, status, &fr)); + + gmx_rmpbc_done(gpbc); + + volume_av /= refr; + + prefactor = 1.0; + prefactor /= 3.0*EPSILON0*volume_av*BOLTZ*temp; + + + prefactorav = E_CHARGE*E_CHARGE; + prefactorav /= volume_av*BOLTZMANN*temp*NANO*6.0; + + fprintf(stderr, "Prefactor fit E-H: 1 / 6.0*V*k_B*T: %g\n", prefactorav); + + calc_mjdsp(fmjdsp, prefactorav, dsp2, time, nfr, xshfr); + + /* + * Now we can average and calculate the correlation functions + */ + + + mj2 /= refr; + mjd /= refr; + md2 /= refr; + + svmul(1.0/refr, mdvec, mdvec); + svmul(1.0/refr, mja_tmp, mja_tmp); + + mdav2 = norm2(mdvec); + mj = norm2(mja_tmp); + mjdav = iprod(mdvec, mja_tmp); + + + printf("\n\nAverage translational dipole moment M_J [enm] after %d frames (|M|^2): %f %f %f (%f)\n", nfr, mja_tmp[XX], mja_tmp[YY], mja_tmp[ZZ], mj2); + printf("\n\nAverage molecular dipole moment M_D [enm] after %d frames (|M|^2): %f %f %f (%f)\n", nfr, mdvec[XX], mdvec[YY], mdvec[ZZ], md2); + + if (v0 != NULL) + { + trust *= (real) nvfr; + itrust = (int) trust; + + if (bINT) + { + + printf("\nCalculating M_D - current correlation integral ... \n"); + corint = calc_cacf(mcor, prefactorav/EPSI0, djc, time, nvfr, vfr, ie, nshift); + + } + + if (bACF) + { + + printf("\nCalculating current autocorrelation ... \n"); + sgk = calc_cacf(outf, prefactorav/PICO, cacf, time, nvfr, vfr, ie, nshift); + + if (ie > ii) + { + + snew(xfit, ie-ii+1); + snew(yfit, ie-ii+1); + + for (i = ii; i <= ie; i++) + { + + xfit[i-ii] = log(time[vfr[i]]); + rtmp = fabs(cacf[i]); + yfit[i-ii] = log(rtmp); + + } + + lsq_y_ax_b(ie-ii, xfit, yfit, &sigma, &malt, &err, &rest); + + malt = exp(malt); + + sigma += 1.0; + + malt *= prefactorav*2.0e12/sigma; + + sfree(xfit); + sfree(yfit); + + } + } + } + + + /* Calculation of the dielectric constant */ + + fprintf(stderr, "\n********************************************\n"); + dk_s = calceps(prefactor, md2, mj2, mjd, eps_rf, FALSE); + fprintf(stderr, "\nAbsolute values:\n epsilon=%f\n", dk_s); + fprintf(stderr, " , , <(M_J*M_D)^2>: (%f, %f, %f)\n\n", md2, mj2, mjd); + fprintf(stderr, "********************************************\n"); + + + dk_f = calceps(prefactor, md2-mdav2, mj2-mj, mjd-mjdav, eps_rf, FALSE); + fprintf(stderr, "\n\nFluctuations:\n epsilon=%f\n\n", dk_f); + fprintf(stderr, "\n deltaM_D , deltaM_J, deltaM_JD: (%f, %f, %f)\n", md2-mdav2, mj2-mj, mjd-mjdav); + fprintf(stderr, "\n********************************************\n"); + if (bINT) + { + dk_d = calceps(prefactor, md2-mdav2, mj2-mj, corint, eps_rf, TRUE); + fprintf(stderr, "\nStatic dielectric constant using integral and fluctuations: %f\n", dk_d); + fprintf(stderr, "\n < M_JM_D > via integral: %.3f\n", -1.0*corint); + } + + fprintf(stderr, "\n***************************************************"); + fprintf(stderr, "\n\nAverage volume V=%f nm^3 at T=%f K\n", volume_av, temp); + fprintf(stderr, "and corresponding refactor 1.0 / 3.0*V*k_B*T*EPSILON_0: %f \n", prefactor); + + + - if (bACF) ++ if (bACF && (ii < nvfr)) + { + fprintf(stderr, "Integral and integrated fit to the current acf yields at t=%f:\n", time[vfr[ii]]); + fprintf(stderr, "sigma=%8.3f (pure integral: %.3f)\n", sgk-malt*pow(time[vfr[ii]], sigma), sgk); + } + + if (ei > bi) + { + fprintf(stderr, "\nStart fit at %f ps (%f).\n", time[bi], bfit); + fprintf(stderr, "End fit at %f ps (%f).\n\n", time[ei], efit); + + snew(xfit, ei-bi+1); + snew(yfit, ei-bi+1); + + for (i = bi; i <= ei; i++) + { + xfit[i-bi] = time[i]; + yfit[i-bi] = dsp2[i]; + } + + lsq_y_ax_b(ei-bi, xfit, yfit, &sigma, &malt, &err, &rest); + + sigma *= 1e12; + dk_d = calceps(prefactor, md2, 0.5*malt/prefactorav, corint, eps_rf, TRUE); + + fprintf(stderr, "Einstein-Helfand fit to the MSD of the translational dipole moment yields:\n\n"); + fprintf(stderr, "sigma=%.4f\n", sigma); + fprintf(stderr, "translational fraction of M^2: %.4f\n", 0.5*malt/prefactorav); + fprintf(stderr, "Dielectric constant using EH: %.4f\n", dk_d); + + sfree(xfit); + sfree(yfit); + } + else + { - fprintf(stderr, "Too less points for a fit.\n"); ++ fprintf(stderr, "Too few points for a fit.\n"); + } + + + if (v0 != NULL) + { + sfree(v0); + } + if (bACF) + { + sfree(cacf); + } + if (bINT) + { + sfree(djc); + } + + sfree(time); + + + sfree(mjdsp); + sfree(mu); +} + + + +int gmx_current(int argc, char *argv[]) +{ + + static int nshift = 1000; + static real temp = 300.0; + static real trust = 0.25; + static real eps_rf = 0.0; + static gmx_bool bNoJump = TRUE; + static real bfit = 100.0; + static real bvit = 0.5; + static real efit = 400.0; + static real evit = 5.0; + t_pargs pa[] = { + { "-sh", FALSE, etINT, {&nshift}, + "Shift of the frames for averaging the correlation functions and the mean-square displacement."}, + { "-nojump", FALSE, etBOOL, {&bNoJump}, + "Removes jumps of atoms across the box."}, + { "-eps", FALSE, etREAL, {&eps_rf}, + "Dielectric constant of the surrounding medium. The value zero corresponds to infinity (tin-foil boundary conditions)."}, + { "-bfit", FALSE, etREAL, {&bfit}, + "Begin of the fit of the straight line to the MSD of the translational fraction of the dipole moment."}, + { "-efit", FALSE, etREAL, {&efit}, + "End of the fit of the straight line to the MSD of the translational fraction of the dipole moment."}, + { "-bvit", FALSE, etREAL, {&bvit}, + "Begin of the fit of the current autocorrelation function to a*t^b."}, + { "-evit", FALSE, etREAL, {&evit}, + "End of the fit of the current autocorrelation function to a*t^b."}, + { "-tr", FALSE, etREAL, {&trust}, + "Fraction of the trajectory taken into account for the integral."}, + { "-temp", FALSE, etREAL, {&temp}, + "Temperature for calculating epsilon."} + }; + + output_env_t oenv; + t_topology top; + char title[STRLEN]; + char **grpname = NULL; + const char *indexfn; + t_trxframe fr; + real *mass2 = NULL; + rvec *xtop, *vtop; + matrix box; + atom_id *index0; + int *indexm = NULL; + int isize; + t_trxstatus *status; + int flags = 0; + gmx_bool bTop; + gmx_bool bNEU; + gmx_bool bACF; + gmx_bool bINT; + int ePBC = -1; + int natoms; + int nmols; + int i, j, k = 0, l; + int step; + real t; + real lambda; + real *qmol; + FILE *outf = NULL; + FILE *outi = NULL; + FILE *tfile = NULL; + FILE *mcor = NULL; + FILE *fmj = NULL; + FILE *fmd = NULL; + FILE *fmjdsp = NULL; + FILE *fcur = NULL; + t_filenm fnm[] = { + { efTPS, NULL, NULL, ffREAD }, /* this is for the topology */ + { efNDX, NULL, NULL, ffOPTRD }, + { efTRX, "-f", NULL, ffREAD }, /* and this for the trajectory */ + { efXVG, "-o", "current.xvg", ffWRITE }, + { efXVG, "-caf", "caf.xvg", ffOPTWR }, + { efXVG, "-dsp", "dsp.xvg", ffWRITE }, + { efXVG, "-md", "md.xvg", ffWRITE }, + { efXVG, "-mj", "mj.xvg", ffWRITE}, + { efXVG, "-mc", "mc.xvg", ffOPTWR } + }; + +#define NFILE asize(fnm) + + + const char *desc[] = { + "[THISMODULE] is a tool for calculating the current autocorrelation function, the correlation", + "of the rotational and translational dipole moment of the system, and the resulting static", + "dielectric constant. To obtain a reasonable result, the index group has to be neutral.", + "Furthermore, the routine is capable of extracting the static conductivity from the current ", + "autocorrelation function, if velocities are given. Additionally, an Einstein-Helfand fit ", + "can be used to obtain the static conductivity." + "[PAR]", + "The flag [TT]-caf[tt] is for the output of the current autocorrelation function and [TT]-mc[tt] writes the", + "correlation of the rotational and translational part of the dipole moment in the corresponding", + "file. However, this option is only available for trajectories containing velocities.", + "Options [TT]-sh[tt] and [TT]-tr[tt] are responsible for the averaging and integration of the", + "autocorrelation functions. Since averaging proceeds by shifting the starting point", + "through the trajectory, the shift can be modified with [TT]-sh[tt] to enable the choice of uncorrelated", + "starting points. Towards the end, statistical inaccuracy grows and integrating the", + "correlation function only yields reliable values until a certain point, depending on", + "the number of frames. The option [TT]-tr[tt] controls the region of the integral taken into account", + "for calculating the static dielectric constant.", + "[PAR]", + "Option [TT]-temp[tt] sets the temperature required for the computation of the static dielectric constant.", + "[PAR]", + "Option [TT]-eps[tt] controls the dielectric constant of the surrounding medium for simulations using", + "a Reaction Field or dipole corrections of the Ewald summation ([TT]-eps[tt]=0 corresponds to", + "tin-foil boundary conditions).", + "[PAR]", + "[TT]-[no]nojump[tt] unfolds the coordinates to allow free diffusion. This is required to get a continuous", + "translational dipole moment, required for the Einstein-Helfand fit. The results from the fit allow", + "the determination of the dielectric constant for system of charged molecules. However, it is also possible to extract", + "the dielectric constant from the fluctuations of the total dipole moment in folded coordinates. But this", + "option has to be used with care, since only very short time spans fulfill the approximation that the density", + "of the molecules is approximately constant and the averages are already converged. To be on the safe side,", + "the dielectric constant should be calculated with the help of the Einstein-Helfand method for", + "the translational part of the dielectric constant." + }; + + + /* At first the arguments will be parsed and the system information processed */ + if (!parse_common_args(&argc, argv, PCA_CAN_TIME | PCA_CAN_VIEW, + NFILE, fnm, asize(pa), pa, asize(desc), desc, 0, NULL, &oenv)) + { + return 0; + } + + bACF = opt2bSet("-caf", NFILE, fnm); + bINT = opt2bSet("-mc", NFILE, fnm); + + bTop = read_tps_conf(ftp2fn(efTPS, NFILE, fnm), title, &top, &ePBC, &xtop, &vtop, box, TRUE); + + + + sfree(xtop); + sfree(vtop); + indexfn = ftp2fn_null(efNDX, NFILE, fnm); + snew(grpname, 1); + + get_index(&(top.atoms), indexfn, 1, &isize, &index0, grpname); + + flags = flags | TRX_READ_X | TRX_READ_V; + + read_first_frame(oenv, &status, ftp2fn(efTRX, NFILE, fnm), &fr, flags); + + snew(mass2, top.atoms.nr); + snew(qmol, top.atoms.nr); + + bNEU = precalc(top, mass2, qmol); + + + snew(indexm, isize); + + for (i = 0; i < isize; i++) + { + indexm[i] = index0[i]; + } + + nmols = isize; + + + index_atom2mol(&nmols, indexm, &top.mols); + + if (fr.bV) + { + if (bACF) + { + outf = xvgropen(opt2fn("-caf", NFILE, fnm), + "Current autocorrelation function", output_env_get_xvgr_tlabel(oenv), + "ACF (e nm/ps)\\S2", oenv); + fprintf(outf, "# time\t acf\t average \t std.dev\n"); + } + fcur = xvgropen(opt2fn("-o", NFILE, fnm), + "Current", output_env_get_xvgr_tlabel(oenv), "J(t) (e nm/ps)", oenv); + fprintf(fcur, "# time\t Jx\t Jy \t J_z \n"); + if (bINT) + { + mcor = xvgropen(opt2fn("-mc", NFILE, fnm), + "M\\sD\\N - current autocorrelation function", + output_env_get_xvgr_tlabel(oenv), + "< M\\sD\\N (0)\\c7\\CJ(t) > (e nm/ps)\\S2", oenv); + fprintf(mcor, "# time\t M_D(0) J(t) acf \t Integral acf\n"); + } + } + + fmj = xvgropen(opt2fn("-mj", NFILE, fnm), + "Averaged translational part of M", output_env_get_xvgr_tlabel(oenv), + "< M\\sJ\\N > (enm)", oenv); + fprintf(fmj, "# time\t x\t y \t z \t average of M_J^2 \t std.dev\n"); + fmd = xvgropen(opt2fn("-md", NFILE, fnm), + "Averaged rotational part of M", output_env_get_xvgr_tlabel(oenv), + "< M\\sD\\N > (enm)", oenv); + fprintf(fmd, "# time\t x\t y \t z \t average of M_D^2 \t std.dev\n"); + + fmjdsp = xvgropen(opt2fn("-dsp", NFILE, fnm), + "MSD of the squared translational dipole moment M", + output_env_get_xvgr_tlabel(oenv), + "<|M\\sJ\\N(t)-M\\sJ\\N(0)|\\S2\\N > / 6.0*V*k\\sB\\N*T / Sm\\S-1\\Nps\\S-1\\N", + oenv); + + + /* System information is read and prepared, dielectric() processes the frames + * and calculates the requested quantities */ + + dielectric(fmj, fmd, outf, fcur, mcor, fmjdsp, bNoJump, bACF, bINT, ePBC, top, fr, + temp, trust, bfit, efit, bvit, evit, status, isize, nmols, nshift, + index0, indexm, mass2, qmol, eps_rf, oenv); + + gmx_ffclose(fmj); + gmx_ffclose(fmd); + gmx_ffclose(fmjdsp); + if (bACF) + { + gmx_ffclose(outf); + } + if (bINT) + { + gmx_ffclose(mcor); + } + + return 0; +}